mirror of
https://github.com/rancher/k3k.git
synced 2026-04-10 04:36:53 +00:00
Compare commits
81 Commits
v1.0.2-rc1
...
chart-1.1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efbd179a77 | ||
|
|
746fd3c36c | ||
|
|
efc28292ee | ||
|
|
0395befc54 | ||
|
|
d5f3c3e304 | ||
|
|
e21b1b6ba6 | ||
|
|
d7ab4320c7 | ||
|
|
96d812a3cb | ||
|
|
1808926d44 | ||
|
|
3ec41c3717 | ||
|
|
8add97934e | ||
|
|
ba819618e3 | ||
|
|
c4cd768789 | ||
|
|
183c5a0a5a | ||
|
|
20b42b0dcf | ||
|
|
ef2bb0339a | ||
|
|
93e8ab6d8f | ||
|
|
18889ba6b7 | ||
|
|
491e43057c | ||
|
|
8403214d58 | ||
|
|
624fee98d7 | ||
|
|
f0375c26bb | ||
|
|
25e910ccaf | ||
|
|
3ec7434ce3 | ||
|
|
f4cd57b9f5 | ||
|
|
0dbd930292 | ||
|
|
9554628fc5 | ||
|
|
78e805889d | ||
|
|
34ef69ba50 | ||
|
|
97a6a61859 | ||
|
|
056b36e8b5 | ||
|
|
c34565da4d | ||
|
|
7b0f695248 | ||
|
|
675ece9edc | ||
|
|
733fb345cc | ||
|
|
0b214e0769 | ||
|
|
512339440b | ||
|
|
9d38388c80 | ||
|
|
e6f0cb414c | ||
|
|
4928ca8925 | ||
|
|
e89a790fc9 | ||
|
|
7641a1c9c5 | ||
|
|
d975171920 | ||
|
|
fcb05793b1 | ||
|
|
83b4415f02 | ||
|
|
cd72bcbc15 | ||
|
|
9836f8376d | ||
|
|
dba054786e | ||
|
|
c94f7c7a30 | ||
|
|
1a16527750 | ||
|
|
e7df4ed7f0 | ||
|
|
9fae02fcbf | ||
|
|
f341f7f5e8 | ||
|
|
ca50a6b231 | ||
|
|
004e177ac1 | ||
|
|
0164c785ab | ||
|
|
c1b7da4c72 | ||
|
|
ff0b03af02 | ||
|
|
62a76a8202 | ||
|
|
9e841cc75c | ||
|
|
bc79a2e6a9 | ||
|
|
3681614a3e | ||
|
|
f04d88bd3f | ||
|
|
4b293cef42 | ||
|
|
1e0aa0ad37 | ||
|
|
e28fa84ae7 | ||
|
|
511be5aa4e | ||
|
|
cd6c962bcf | ||
|
|
c0418267c9 | ||
|
|
eaa20c16e7 | ||
|
|
0cea0c9e14 | ||
|
|
d12f3ea757 | ||
|
|
9ea81c861b | ||
|
|
20c5441030 | ||
|
|
a3a4c931a0 | ||
|
|
fcc7191ab3 | ||
|
|
ff6862e511 | ||
|
|
20305e03b7 | ||
|
|
5f42eafd2a | ||
|
|
ccc3d1651c | ||
|
|
0185998aa0 |
11
.github/renovate.json
vendored
Normal file
11
.github/renovate.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"github>rancher/renovate-config#release"
|
||||
],
|
||||
"baseBranchPatterns": [
|
||||
"main",
|
||||
"release/v1.0"
|
||||
],
|
||||
"prHourlyLimit": 2
|
||||
}
|
||||
24
.github/workflows/build.yml
vendored
24
.github/workflows/build.yml
vendored
@@ -2,9 +2,9 @@ name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
branches: [main]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -19,18 +19,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: v2
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
REGISTRY: ""
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3kcli)
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -50,13 +50,13 @@ jobs:
|
||||
output: 'trivy-results-k3kcli.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3kcli)
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3kcli.sarif
|
||||
category: k3kcli
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3k)
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -66,13 +66,13 @@ jobs:
|
||||
output: 'trivy-results-k3k.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3k)
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3k.sarif
|
||||
category: k3k
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3k-kubelet)
|
||||
uses: aquasecurity/trivy-action@0.28.0
|
||||
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
output: 'trivy-results-k3k-kubelet.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet)
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3k-kubelet.sarif
|
||||
category: k3k-kubelet
|
||||
|
||||
21
.github/workflows/chart.yml
vendored
21
.github/workflows/chart.yml
vendored
@@ -6,12 +6,16 @@ on:
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
HELM_VERSION: v4.1.3
|
||||
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
|
||||
|
||||
jobs:
|
||||
chart-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -19,14 +23,17 @@ jobs:
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Install helm
|
||||
run: |
|
||||
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
|
||||
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
|
||||
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
|
||||
sudo mv /tmp/helm /usr/local/bin
|
||||
sudo chmod +x /usr/local/bin/helm
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
|
||||
with:
|
||||
config: .cr.yaml
|
||||
env:
|
||||
|
||||
8
.github/workflows/fossa.yml
vendored
8
.github/workflows/fossa.yml
vendored
@@ -2,7 +2,7 @@ name: FOSSA Scanning
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main", "master", "release/**"]
|
||||
branches: ["main", "release/**"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -15,18 +15,18 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
|
||||
# used directly and there is no need to request specific access to EIO.
|
||||
- name: Read FOSSA token
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
|
||||
with:
|
||||
secrets: |
|
||||
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
|
||||
|
||||
- name: FOSSA scan
|
||||
uses: fossas/fossa-action@main
|
||||
uses: fossas/fossa-action@c414b9ad82eaad041e47a7cf62a4f02411f427a0 # v1.8.0
|
||||
with:
|
||||
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
|
||||
# Only runs the scan and do not provide/returns any results back to the
|
||||
|
||||
2
.github/workflows/release-delete.yml
vendored
2
.github/workflows/release-delete.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
run: echo "::error::Missing tag from input" && exit 1
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Check if release is draft
|
||||
run: |
|
||||
|
||||
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
@@ -31,15 +31,22 @@ jobs:
|
||||
run: git checkout ${{ inputs.commit }}
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
|
||||
with:
|
||||
image: tonistiigi/binfmt:qemu-v10.0.4-56
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
|
||||
with:
|
||||
version: v0.30.1
|
||||
|
||||
- name: "Read secrets"
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
|
||||
if: github.repository_owner == 'rancher'
|
||||
with:
|
||||
secrets: |
|
||||
@@ -55,7 +62,7 @@ jobs:
|
||||
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ env.DOCKER_USERNAME }}
|
||||
@@ -78,7 +85,7 @@ jobs:
|
||||
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: v2
|
||||
|
||||
63
.github/workflows/renovate-vault.yml
vendored
Normal file
63
.github/workflows/renovate-vault.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Renovate
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
logLevel:
|
||||
description: "Override default log level"
|
||||
required: false
|
||||
default: info
|
||||
type: choice
|
||||
options:
|
||||
- info
|
||||
- debug
|
||||
overrideSchedule:
|
||||
description: "Override all schedules"
|
||||
required: false
|
||||
default: "false"
|
||||
type: choice
|
||||
options:
|
||||
- "false"
|
||||
- "true"
|
||||
configMigration:
|
||||
description: "Toggle PRs for config migration"
|
||||
required: false
|
||||
default: "true"
|
||||
type: choice
|
||||
options:
|
||||
- "false"
|
||||
- "true"
|
||||
renovateConfig:
|
||||
description: "Define a custom renovate config file"
|
||||
required: false
|
||||
default: ".github/renovate.json"
|
||||
type: string
|
||||
minimumReleaseAge:
|
||||
description: "Override minimumReleaseAge for a one-time run (e.g., '0 days' to disable delay)"
|
||||
required: false
|
||||
default: "null"
|
||||
type: string
|
||||
extendsPreset:
|
||||
description: "Override renovate extends preset (default: 'github>rancher/renovate-config#release')."
|
||||
required: false
|
||||
default: "github>rancher/renovate-config#release"
|
||||
type: string
|
||||
|
||||
schedule:
|
||||
- cron: '30 4,6 * * 1-5'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
call-workflow:
|
||||
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@240174f0ae1994c3f6e94b8b062ea4aceed4a182 # release
|
||||
with:
|
||||
configMigration: ${{ inputs.configMigration || 'true' }}
|
||||
logLevel: ${{ inputs.logLevel || 'info' }}
|
||||
overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }}
|
||||
renovateConfig: ${{ inputs.renovateConfig || '.github/renovate.json' }}
|
||||
minimumReleaseAge: ${{ inputs.minimumReleaseAge || 'null' }}
|
||||
extendsPreset: ${{ inputs.extendsPreset || 'github>rancher/renovate-config#release' }}
|
||||
secrets:
|
||||
override-token: "${{ secrets.RENOVATE_FORK_GH_TOKEN || '' }}"
|
||||
128
.github/workflows/test-conformance-shared.yaml
vendored
128
.github/workflows/test-conformance-shared.yaml
vendored
@@ -4,44 +4,91 @@ on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
k3k_version:
|
||||
description: 'K3k version to test (e.g. v1.0.2). Leave empty to build from source.'
|
||||
required: false
|
||||
type: string
|
||||
k8s_version:
|
||||
description: 'Kubernetes version to test'
|
||||
required: false
|
||||
type: choice
|
||||
options:
|
||||
- ""
|
||||
- "v1.34.6"
|
||||
- "v1.35.3"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
K8S_VERSIONS: "v1.34.6,v1.35.3"
|
||||
HELM_VERSION: v4.1.3
|
||||
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
|
||||
K3D_VERSION: v5.8.3
|
||||
K3D_BIN_HASH_AMD64: dbaa79a76ace7f4ca230a1ff41dc7d8a5036a8ad0309e9c54f9bf3836dbe853e
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
k8s_versions: ${{ steps.set-matrix.outputs.k8s_versions }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [[ -z "${{ inputs.k8s_version }}" ]]; then
|
||||
JSON_ARRAY=$(jq -nc '"${{ env.K8S_VERSIONS }}" | split(",")')
|
||||
echo "k8s_versions=${JSON_ARRAY}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "k8s_versions=[\"${{ inputs.k8s_version }}\"]" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
conformance:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
k8s_version: ${{ fromJSON(needs.setup.outputs.k8s_versions) }}
|
||||
|
||||
env:
|
||||
KUBERNETES_VERSION: ${{ matrix.k8s_version }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
run: |
|
||||
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
|
||||
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
|
||||
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
|
||||
sudo mv /tmp/helm /usr/local/bin
|
||||
sudo chmod +x /usr/local/bin/helm
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
|
||||
|
||||
- name: Install k3d # taken from github.com/rancher/rancher/.github/workflows/integration-tests.yaml
|
||||
run: |
|
||||
curl -sSfL -o k3d "https://github.com/k3d-io/k3d/releases/download/${{ env.K3D_VERSION }}/k3d-linux-amd64"
|
||||
echo "${{ env.K3D_BIN_HASH_AMD64 }} k3d" | sha256sum --check
|
||||
sudo mv k3d /usr/local/bin
|
||||
sudo chmod +x /usr/local/bin/k3d
|
||||
|
||||
- name: Install k3d and kubectl
|
||||
run: |
|
||||
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
||||
k3d version
|
||||
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/${{ env.KUBERNETES_VERSION }}/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/${{ env.KUBERNETES_VERSION }}/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
|
||||
- name: Setup Kubernetes (k3d)
|
||||
env:
|
||||
@@ -53,13 +100,15 @@ jobs:
|
||||
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
|
||||
|
||||
k3d cluster create k3k --servers 2 \
|
||||
--image rancher/k3s:${{ env.KUBERNETES_VERSION }}-k3s1 \
|
||||
-p "30000-30010:30000-30010@server:0" \
|
||||
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Setup K3k
|
||||
- name: Setup K3k (from source)
|
||||
if: inputs.k3k_version == ''
|
||||
env:
|
||||
REPO: k3k-registry:12345
|
||||
run: |
|
||||
@@ -77,9 +126,29 @@ jobs:
|
||||
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
|
||||
|
||||
make install
|
||||
|
||||
- name: Setup K3k (from release)
|
||||
if: inputs.k3k_version != ''
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
K3K_VERSION="${{ inputs.k3k_version }}"
|
||||
CHART_VERSION="${K3K_VERSION#v}"
|
||||
|
||||
helm repo add k3k https://rancher.github.io/k3k
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace --version "${CHART_VERSION}" k3k k3k/k3k
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
wget -qO k3kcli "https://github.com/rancher/k3k/releases/download/${{ inputs.k3k_version }}/k3kcli-linux-amd64"
|
||||
sudo mv k3kcli /usr/local/bin/k3kcli
|
||||
sudo chmod +x /usr/local/bin/k3kcli
|
||||
|
||||
- name: Wait for K3k controller
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
echo "Wait for K3k controller deployment to be available"
|
||||
kubectl wait -n k3k-system deployment -l "app.kubernetes.io/name=k3k" --for=condition=Available --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
@@ -114,33 +183,30 @@ jobs:
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run conformance tests (parallel)
|
||||
if: matrix.type == 'parallel'
|
||||
- name: Run conformance tests
|
||||
run: |
|
||||
# Run conformance tests in parallel mode (skipping serial)
|
||||
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
|
||||
hydrophone --conformance --parallel 4 \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Run conformance tests (serial)
|
||||
if: matrix.type == 'serial'
|
||||
run: |
|
||||
# Run serial conformance tests
|
||||
hydrophone --focus='\[Serial\].*\[Conformance\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
- name: Archive logs
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.type }}-logs
|
||||
name: conformance-${{ matrix.k8s_version }}-logs
|
||||
path: /tmp/e2e.log
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.k8s_version }}-results
|
||||
path: /tmp/junit_01.xml
|
||||
|
||||
- name: Job Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
|
||||
echo '## 📊 Conformance Tests Results (${{ matrix.k8s_version }})' >> $GITHUB_STEP_SUMMARY
|
||||
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
|
||||
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
121
.github/workflows/test-conformance-virtual.yaml
vendored
121
.github/workflows/test-conformance-virtual.yaml
vendored
@@ -4,49 +4,89 @@ on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
k3k_version:
|
||||
description: 'K3k version to test (e.g. v1.0.2). Leave empty to build from source.'
|
||||
required: false
|
||||
type: string
|
||||
k8s_version:
|
||||
description: 'Kubernetes version to test'
|
||||
required: false
|
||||
type: choice
|
||||
options:
|
||||
- ""
|
||||
- "v1.34.6"
|
||||
- "v1.35.3"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
K8S_VERSIONS: "v1.34.6,v1.35.3"
|
||||
HELM_VERSION: v4.1.3
|
||||
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
k8s_versions: ${{ steps.set-matrix.outputs.k8s_versions }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [[ -z "${{ inputs.k8s_version }}" ]]; then
|
||||
JSON_ARRAY=$(jq -nc '"${{ env.K8S_VERSIONS }}" | split(",")')
|
||||
echo "k8s_versions=${JSON_ARRAY}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "k8s_versions=[\"${{ inputs.k8s_version }}\"]" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
conformance:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
k8s_version: ${{ fromJSON(needs.setup.outputs.k8s_versions) }}
|
||||
|
||||
env:
|
||||
KUBERNETES_VERSION: ${{ matrix.k8s_version }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
run: |
|
||||
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
|
||||
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
|
||||
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
|
||||
sudo mv /tmp/helm /usr/local/bin
|
||||
sudo chmod +x /usr/local/bin/helm
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
|
||||
|
||||
- name: Install k3s
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
K3S_HOST_VERSION: v1.32.1+k3s1
|
||||
K3S_HOST_VERSION: ${{ env.KUBERNETES_VERSION }}+k3s1
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Build, package and setup K3k
|
||||
- name: Setup K3k (from source)
|
||||
if: inputs.k3k_version == ''
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
@@ -60,9 +100,29 @@ jobs:
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Setup K3k (from release)
|
||||
if: inputs.k3k_version != ''
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
K3K_VERSION="${{ inputs.k3k_version }}"
|
||||
CHART_VERSION="${K3K_VERSION#v}"
|
||||
|
||||
helm repo add k3k https://rancher.github.io/k3k
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace --version "${CHART_VERSION}" k3k k3k/k3k
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
wget -qO k3kcli "https://github.com/rancher/k3k/releases/download/${{ inputs.k3k_version }}/k3kcli-linux-amd64"
|
||||
sudo mv k3kcli /usr/local/bin/k3kcli
|
||||
sudo chmod +x /usr/local/bin/k3kcli
|
||||
|
||||
- name: Wait for K3k controller
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
echo "Wait for K3k controller deployment to be available"
|
||||
kubectl wait -n k3k-system deployment -l "app.kubernetes.io/name=k3k" --for=condition=Available --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
@@ -79,23 +139,13 @@ jobs:
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run conformance tests (parallel)
|
||||
if: matrix.type == 'parallel'
|
||||
- name: Run conformance tests
|
||||
run: |
|
||||
# Run conformance tests in parallel mode (skipping serial)
|
||||
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
|
||||
hydrophone --conformance --parallel 4 \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Run conformance tests (serial)
|
||||
if: matrix.type == 'serial'
|
||||
run: |
|
||||
# Run serial conformance tests
|
||||
hydrophone --focus='\[Serial\].*\[Conformance\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Export logs
|
||||
- name: Collect logs
|
||||
if: always()
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
@@ -104,30 +154,37 @@ jobs:
|
||||
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
|
||||
|
||||
- name: Archive K3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-${{ matrix.type }}-logs
|
||||
name: k3s-${{ matrix.k8s_version }}-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive K3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-${{ matrix.type }}-logs
|
||||
name: k3k-${{ matrix.k8s_version }}-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.type }}-logs
|
||||
name: conformance-${{ matrix.k8s_version }}-logs
|
||||
path: /tmp/e2e.log
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.k8s_version }}-results
|
||||
path: /tmp/junit_01.xml
|
||||
|
||||
- name: Job Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
|
||||
echo '## 📊 Conformance Tests Results (${{ matrix.k8s_version }})' >> $GITHUB_STEP_SUMMARY
|
||||
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
|
||||
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
80
.github/workflows/test-e2e.yaml
vendored
80
.github/workflows/test-e2e.yaml
vendored
@@ -2,47 +2,34 @@ name: Tests E2E
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
KUBERNETES_VERSION: v1.35.3
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Pandoc
|
||||
run: sudo apt-get install pandoc
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -52,11 +39,11 @@ jobs:
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
@@ -80,49 +67,56 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Export logs
|
||||
if: always()
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
|
||||
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
tests-e2e-slow:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -132,11 +126,11 @@ jobs:
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
@@ -160,29 +154,37 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Export logs
|
||||
if: always()
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
|
||||
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
name: e2e-slow-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
name: e2e-slow-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
61
.github/workflows/test.yaml
vendored
61
.github/workflows/test.yaml
vendored
@@ -2,56 +2,26 @@ name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
KUBERNETES_VERSION: v1.35.3
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v2.3.0
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Pandoc
|
||||
run: sudo apt-get install pandoc
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
@@ -59,7 +29,7 @@ jobs:
|
||||
run: make test-unit
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
@@ -67,21 +37,20 @@ jobs:
|
||||
|
||||
tests-cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -89,7 +58,7 @@ jobs:
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
@@ -112,21 +81,21 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${{ github.workspace }}/covdata/cover.out
|
||||
flags: cli
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3k-logs
|
||||
|
||||
41
.github/workflows/validate.yml
vendored
Normal file
41
.github/workflows/validate.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Validate
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
|
||||
- name: Install Pandoc
|
||||
run: sudo apt-get install pandoc
|
||||
|
||||
- name: Run linters
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
version: v2.11.4
|
||||
args: -v
|
||||
only-new-issues: true
|
||||
skip-cache: false
|
||||
|
||||
- name: Run formatters
|
||||
run: golangci-lint -v fmt ./...
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
37
Makefile
37
Makefile
@@ -5,8 +5,8 @@ VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
## Dependencies
|
||||
|
||||
GOLANGCI_LINT_VERSION := v2.3.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
GOLANGCI_LINT_VERSION := v2.11.4
|
||||
GINKGO_VERSION ?= v2.28.1
|
||||
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
@@ -60,24 +60,32 @@ test: ## Run all the tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
test-unit: ## Run the unit tests (skips the e2e and integration tests)
|
||||
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
|
||||
.PHONY: test-kubelet
|
||||
test-kubelet: ## Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/k3k-kubelet
|
||||
|
||||
.PHONY: test-kubelet-controller
|
||||
test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
|
||||
.PHONY: test-policy
|
||||
test-policy: ## Run the policy controller tests (tests/integration/policy)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/policy
|
||||
|
||||
.PHONY: test-cluster
|
||||
test-cluster: ## Run the cluster controller tests (tests/integration/cluster)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/cluster
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration: ## Run the controller tests that use envtest (tests/integration)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --flake-attempts=3 --label-filter="$(E2E_LABEL_FILTER)" tests/e2e
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --flake-attempts=3 tests/cli
|
||||
|
||||
.PHONY: generate
|
||||
generate: ## Generate the CRDs specs
|
||||
@@ -111,14 +119,17 @@ lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Find any linting issues in the project
|
||||
fmt: ## Format source files in the project
|
||||
ifndef CI
|
||||
$(GOLANGCI_LINT) fmt ./...
|
||||
endif
|
||||
|
||||
.PHONY: validate
|
||||
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git status --porcelain
|
||||
go mod verify
|
||||
git status --porcelain
|
||||
git --no-pager diff --exit-code
|
||||
|
||||
.PHONY: install
|
||||
|
||||
@@ -67,7 +67,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.2/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -75,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli version v1.0.1
|
||||
k3kcli version v1.0.2
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 1.0.1
|
||||
appVersion: v1.0.1
|
||||
version: 1.1.0-rc3
|
||||
appVersion: v1.1.0-rc3
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -34,7 +34,6 @@ spec:
|
||||
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
|
||||
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
|
||||
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
|
||||
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --agent-image-pull-secret
|
||||
- {{ .name }}
|
||||
@@ -55,7 +54,4 @@ spec:
|
||||
- containerPort: 8080
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 9443
|
||||
name: https-webhook
|
||||
protocol: TCP
|
||||
serviceAccountName: {{ include "k3k.serviceAccountName" . }}
|
||||
|
||||
@@ -23,9 +23,11 @@ rules:
|
||||
resources:
|
||||
- "nodes"
|
||||
- "nodes/proxy"
|
||||
- "namespaces"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
- "watch"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: k3k-webhook
|
||||
labels:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
name: https-webhook
|
||||
targetPort: 9443
|
||||
selector:
|
||||
{{- include "k3k.selectorLabels" . | nindent 6 }}
|
||||
@@ -56,7 +56,7 @@ controller:
|
||||
server:
|
||||
imagePullSecrets: []
|
||||
image:
|
||||
registry:
|
||||
registry:
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
@@ -74,8 +74,6 @@ agent:
|
||||
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
|
||||
# configuration related to agent in virtual mode
|
||||
virtual:
|
||||
|
||||
@@ -12,6 +12,7 @@ func NewClusterCmd(appCtx *AppContext) *cobra.Command {
|
||||
|
||||
cmd.AddCommand(
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterUpdateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
NewClusterListCmd(appCtx),
|
||||
)
|
||||
|
||||
@@ -13,12 +13,13 @@ import (
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@@ -117,7 +118,10 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
|
||||
logrus.Infof("Creating cluster '%s' in namespace '%s'", name, namespace)
|
||||
|
||||
cluster := newCluster(name, namespace, config)
|
||||
cluster, err := newCluster(name, namespace, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Spec.Expose = &v1beta1.ExposeConfig{
|
||||
NodePort: &v1beta1.NodePortConfig{},
|
||||
@@ -148,9 +152,9 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
return fmt.Errorf("failed to wait for cluster to be reconciled: %w", err)
|
||||
}
|
||||
|
||||
clusterDetails, err := printClusterDetails(cluster)
|
||||
clusterDetails, err := getClusterDetails(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to print cluster details: %w", err)
|
||||
return fmt.Errorf("failed to get cluster details: %w", err)
|
||||
}
|
||||
|
||||
logrus.Info(clusterDetails)
|
||||
@@ -185,7 +189,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
func newCluster(name, namespace string, config *CreateConfig) (*v1beta1.Cluster, error) {
|
||||
var storageRequestSize *resource.Quantity
|
||||
if config.storageRequestSize != "" {
|
||||
parsed, err := resource.ParseQuantity(config.storageRequestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageRequestSize = ptr.To(parsed)
|
||||
}
|
||||
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -211,7 +225,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
Persistence: v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.PersistenceMode(config.persistenceType),
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
StorageRequestSize: config.storageRequestSize,
|
||||
StorageRequestSize: storageRequestSize,
|
||||
},
|
||||
MirrorHostNodes: config.mirrorHostNodes,
|
||||
},
|
||||
@@ -221,7 +235,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
cluster.Spec.TokenSecretRef = &v1.SecretReference{
|
||||
cluster.Spec.TokenSecretRef = &corev1.SecretReference{
|
||||
Name: k3kcluster.TokenSecretName(name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
@@ -253,11 +267,11 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
}
|
||||
}
|
||||
|
||||
return cluster
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func env(envSlice []string) []v1.EnvVar {
|
||||
var envVars []v1.EnvVar
|
||||
func env(envSlice []string) []corev1.EnvVar {
|
||||
var envVars []corev1.EnvVar
|
||||
|
||||
for _, env := range envSlice {
|
||||
keyValue := strings.Split(env, "=")
|
||||
@@ -265,7 +279,7 @@ func env(envSlice []string) []v1.EnvVar {
|
||||
logrus.Fatalf("incorrect value for environment variable %s", env)
|
||||
}
|
||||
|
||||
envVars = append(envVars, v1.EnvVar{
|
||||
envVars = append(envVars, corev1.EnvVar{
|
||||
Name: keyValue[0],
|
||||
Value: keyValue[1],
|
||||
})
|
||||
@@ -352,8 +366,8 @@ func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsP
|
||||
return nil
|
||||
}
|
||||
|
||||
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
@@ -362,10 +376,10 @@ func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []by
|
||||
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
|
||||
Namespace: clusterNamespace,
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
Type: corev1.SecretTypeTLS,
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: cert,
|
||||
v1.TLSPrivateKeyKey: key,
|
||||
corev1.TLSCertKey: cert,
|
||||
corev1.TLSPrivateKeyKey: key,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -399,9 +413,13 @@ const clusterDetailsTemplate = `Cluster details:
|
||||
Persistence:
|
||||
Type: {{.Persistence.Type}}{{ if .Persistence.StorageClassName }}
|
||||
StorageClass: {{ .Persistence.StorageClassName }}{{ end }}{{ if .Persistence.StorageRequestSize }}
|
||||
Size: {{ .Persistence.StorageRequestSize }}{{ end }}`
|
||||
Size: {{ .Persistence.StorageRequestSize }}{{ end }}{{ if .Labels }}
|
||||
Labels: {{ range $key, $value := .Labels }}
|
||||
{{$key}}: {{$value}}{{ end }}{{ end }}{{ if .Annotations }}
|
||||
Annotations: {{ range $key, $value := .Annotations }}
|
||||
{{$key}}: {{$value}}{{ end }}{{ end }}`
|
||||
|
||||
func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
|
||||
func getClusterDetails(cluster *v1beta1.Cluster) (string, error) {
|
||||
type templateData struct {
|
||||
Mode v1beta1.ClusterMode
|
||||
Servers int32
|
||||
@@ -413,6 +431,8 @@ func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
|
||||
StorageClassName string
|
||||
StorageRequestSize string
|
||||
}
|
||||
Labels map[string]string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
data := templateData{
|
||||
@@ -421,11 +441,16 @@ func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
|
||||
Agents: ptr.Deref(cluster.Spec.Agents, 0),
|
||||
Version: cluster.Spec.Version,
|
||||
HostVersion: cluster.Status.HostVersion,
|
||||
Annotations: cluster.Annotations,
|
||||
Labels: cluster.Labels,
|
||||
}
|
||||
|
||||
data.Persistence.Type = cluster.Spec.Persistence.Type
|
||||
data.Persistence.StorageClassName = ptr.Deref(cluster.Spec.Persistence.StorageClassName, "")
|
||||
data.Persistence.StorageRequestSize = cluster.Spec.Persistence.StorageRequestSize
|
||||
|
||||
if srs := cluster.Spec.Persistence.StorageRequestSize; srs != nil {
|
||||
data.Persistence.StorageRequestSize = srs.String()
|
||||
}
|
||||
|
||||
tmpl, err := template.New("clusterDetails").Parse(clusterDetailsTemplate)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -66,7 +67,7 @@ func Test_printClusterDetails(t *testing.T) {
|
||||
Persistence: v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.DynamicPersistenceMode,
|
||||
StorageClassName: ptr.To("local-path"),
|
||||
StorageRequestSize: "3gb",
|
||||
StorageRequestSize: ptr.To(resource.MustParse("3G")),
|
||||
},
|
||||
},
|
||||
Status: v1beta1.ClusterStatus{
|
||||
@@ -81,13 +82,13 @@ func Test_printClusterDetails(t *testing.T) {
|
||||
Persistence:
|
||||
Type: dynamic
|
||||
StorageClass: local-path
|
||||
Size: 3gb`,
|
||||
Size: 3G`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
clusterDetails, err := printClusterDetails(tt.cluster)
|
||||
clusterDetails, err := getClusterDetails(tt.cluster)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, clusterDetails)
|
||||
})
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
|
||||
var keepData bool
|
||||
@@ -62,11 +61,6 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip removing webhook secret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
|
||||
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
|
||||
|
||||
198
cli/cmds/cluster_update.go
Normal file
198
cli/cmds/cluster_update.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
)
|
||||
|
||||
type UpdateConfig struct {
|
||||
servers int32
|
||||
agents int32
|
||||
labels []string
|
||||
annotations []string
|
||||
version string
|
||||
noConfirm bool
|
||||
}
|
||||
|
||||
func NewClusterUpdateCmd(appCtx *AppContext) *cobra.Command {
|
||||
updateConfig := &UpdateConfig{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update existing cluster",
|
||||
Example: "k3kcli cluster update [command options] NAME",
|
||||
RunE: updateAction(appCtx, updateConfig),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
updateFlags(cmd, updateConfig)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func updateFlags(cmd *cobra.Command, cfg *UpdateConfig) {
|
||||
cmd.Flags().Int32Var(&cfg.servers, "servers", 1, "number of servers")
|
||||
cmd.Flags().Int32Var(&cfg.agents, "agents", 0, "number of agents")
|
||||
cmd.Flags().StringArrayVar(&cfg.labels, "labels", []string{}, "Labels to add to the cluster object (e.g. key=value)")
|
||||
cmd.Flags().StringArrayVar(&cfg.annotations, "annotations", []string{}, "Annotations to add to the cluster object (e.g. key=value)")
|
||||
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
|
||||
cmd.Flags().BoolVarP(&cfg.noConfirm, "no-confirm", "y", false, "Skip interactive approval before applying update")
|
||||
}
|
||||
|
||||
func updateAction(appCtx *AppContext, config *UpdateConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
var virtualCluster v1beta1.Cluster
|
||||
|
||||
clusterKey := types.NamespacedName{Name: name, Namespace: appCtx.namespace}
|
||||
if err := appCtx.Client.Get(ctx, clusterKey, &virtualCluster); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("cluster %s not found in namespace %s", name, appCtx.namespace)
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to fetch cluster: %w", err)
|
||||
}
|
||||
|
||||
var changes []change
|
||||
|
||||
if cmd.Flags().Changed("version") && config.version != virtualCluster.Spec.Version {
|
||||
currentVersion := virtualCluster.Spec.Version
|
||||
if currentVersion == "" {
|
||||
currentVersion = virtualCluster.Status.HostVersion
|
||||
}
|
||||
|
||||
currentVersionSemver, err := semver.ParseTolerant(currentVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse current cluster version %w", err)
|
||||
}
|
||||
|
||||
newVersionSemver, err := semver.ParseTolerant(config.version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse new cluster version %w", err)
|
||||
}
|
||||
|
||||
if newVersionSemver.LT(currentVersionSemver) {
|
||||
return fmt.Errorf("downgrading cluster version is not supported")
|
||||
}
|
||||
|
||||
changes = append(changes, change{"Version", currentVersion, config.version})
|
||||
virtualCluster.Spec.Version = config.version
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("servers") {
|
||||
var oldServers int32
|
||||
if virtualCluster.Spec.Agents != nil {
|
||||
oldServers = *virtualCluster.Spec.Servers
|
||||
}
|
||||
|
||||
if oldServers != config.servers {
|
||||
changes = append(changes, change{"Servers", fmt.Sprintf("%d", oldServers), fmt.Sprintf("%d", config.servers)})
|
||||
virtualCluster.Spec.Servers = ptr.To(config.servers)
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("agents") {
|
||||
var oldAgents int32
|
||||
if virtualCluster.Spec.Agents != nil {
|
||||
oldAgents = *virtualCluster.Spec.Agents
|
||||
}
|
||||
|
||||
if oldAgents != config.agents {
|
||||
changes = append(changes, change{"Agents", fmt.Sprintf("%d", oldAgents), fmt.Sprintf("%d", config.agents)})
|
||||
virtualCluster.Spec.Agents = ptr.To(config.agents)
|
||||
}
|
||||
}
|
||||
|
||||
var labelChanges []change
|
||||
|
||||
if cmd.Flags().Changed("labels") {
|
||||
oldLabels := labels.Merge(nil, virtualCluster.Labels)
|
||||
virtualCluster.Labels = labels.Merge(virtualCluster.Labels, parseKeyValuePairs(config.labels, "label"))
|
||||
labelChanges = diffMaps(oldLabels, virtualCluster.Labels)
|
||||
}
|
||||
|
||||
var annotationChanges []change
|
||||
|
||||
if cmd.Flags().Changed("annotations") {
|
||||
oldAnnotations := labels.Merge(nil, virtualCluster.Annotations)
|
||||
virtualCluster.Annotations = labels.Merge(virtualCluster.Annotations, parseKeyValuePairs(config.annotations, "annotation"))
|
||||
annotationChanges = diffMaps(oldAnnotations, virtualCluster.Annotations)
|
||||
}
|
||||
|
||||
if len(changes) == 0 && len(labelChanges) == 0 && len(annotationChanges) == 0 {
|
||||
logrus.Info("No changes detected, skipping update")
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Infof("Updating cluster '%s' in namespace '%s'", name, namespace)
|
||||
|
||||
printDiff(changes)
|
||||
printMapDiff("Labels", labelChanges)
|
||||
printMapDiff("Annotations", annotationChanges)
|
||||
|
||||
if !config.noConfirm {
|
||||
if !confirmClusterUpdate(&virtualCluster) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Update(ctx, &virtualCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Info("Cluster updated successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func confirmClusterUpdate(cluster *v1beta1.Cluster) bool {
|
||||
clusterDetails, err := getClusterDetails(cluster)
|
||||
if err != nil {
|
||||
logrus.Fatalf("unable to get cluster details: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nNew %s\n", clusterDetails)
|
||||
|
||||
fmt.Printf("\nDo you want to update the cluster? [y/N]: ")
|
||||
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
|
||||
if !scanner.Scan() {
|
||||
if err := scanner.Err(); err != nil {
|
||||
logrus.Errorf("Error reading input: %v", err)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
|
||||
return strings.ToLower(strings.TrimSpace(scanner.Text())) == "y"
|
||||
}
|
||||
53
cli/cmds/diff_printer.go
Normal file
53
cli/cmds/diff_printer.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package cmds
|
||||
|
||||
import "fmt"
|
||||
|
||||
type change struct {
|
||||
field string
|
||||
oldValue string
|
||||
newValue string
|
||||
}
|
||||
|
||||
func printDiff(changes []change) {
|
||||
for _, c := range changes {
|
||||
if c.oldValue == c.newValue {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%s: %s -> %s\n", c.field, c.oldValue, c.newValue)
|
||||
}
|
||||
}
|
||||
|
||||
func printMapDiff(title string, changes []change) {
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("%s:\n", title)
|
||||
|
||||
for _, c := range changes {
|
||||
switch c.oldValue {
|
||||
case "":
|
||||
fmt.Printf(" %s=%s (new)\n", c.field, c.newValue)
|
||||
default:
|
||||
fmt.Printf(" %s=%s -> %s=%s\n", c.field, c.oldValue, c.field, c.newValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diffMaps(oldMap, newMap map[string]string) []change {
|
||||
var changes []change
|
||||
|
||||
// Check for new and changed keys
|
||||
for k, newVal := range newMap {
|
||||
if oldVal, exists := oldMap[k]; exists {
|
||||
if oldVal != newVal {
|
||||
changes = append(changes, change{k, oldVal, newVal})
|
||||
}
|
||||
} else {
|
||||
changes = append(changes, change{k, "", newVal})
|
||||
}
|
||||
}
|
||||
|
||||
return changes
|
||||
}
|
||||
@@ -133,6 +133,40 @@ k3kcli cluster list [command options]
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli cluster update
|
||||
|
||||
Update existing cluster
|
||||
|
||||
----
|
||||
k3kcli cluster update [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli cluster update [command options] NAME
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
--agents int32 number of agents
|
||||
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
|
||||
-h, --help help for update
|
||||
--labels stringArray Labels to add to the cluster object (e.g. key=value)
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
-y, --no-confirm Skip interactive approval before applying update
|
||||
--servers int32 number of servers (default 1)
|
||||
--version string k3s version
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters.
|
||||
|
||||
@@ -21,4 +21,5 @@ K3k cluster command.
|
||||
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create a new cluster.
|
||||
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster.
|
||||
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all existing clusters.
|
||||
* [k3kcli cluster update](k3kcli_cluster_update.md) - Update existing cluster
|
||||
|
||||
|
||||
38
docs/cli/k3kcli_cluster_update.md
Normal file
38
docs/cli/k3kcli_cluster_update.md
Normal file
@@ -0,0 +1,38 @@
|
||||
## k3kcli cluster update
|
||||
|
||||
Update existing cluster
|
||||
|
||||
```
|
||||
k3kcli cluster update [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster update [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--agents int32 number of agents
|
||||
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
|
||||
-h, --help help for update
|
||||
--labels stringArray Labels to add to the cluster object (e.g. key=value)
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
-y, --no-confirm Skip interactive approval before applying update
|
||||
--servers int32 number of servers (default 1)
|
||||
--version string k3s version
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
processor:
|
||||
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
|
||||
ignoreFields:
|
||||
- "status$"
|
||||
- "TypeMeta$"
|
||||
|
||||
render:
|
||||
|
||||
@@ -41,6 +41,38 @@ _Appears In:_
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy"]
|
||||
=== AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`name`* __string__ | name is the name of the VirtualClusterPolicy currently applied to this cluster. + | | MinLength: 1 +
|
||||
|
||||
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`serverAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`agentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`runtimeClassName`* __string__ | SecurityContext specifies custom SecurityContext to be added +
|
||||
to the agent and server pods of the cluster in virtual or shared mode. + | |
|
||||
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | RuntimeClassName specifies alternative runtime class for the +
|
||||
agent and server pods of the cluster in virtual or shared mode. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
|
||||
=== Cluster
|
||||
|
||||
@@ -64,6 +96,7 @@ _Appears In:_
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]__ | Spec defines the desired state of the Cluster. + | { } |
|
||||
| *`status`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]__ | Status reflects the observed state of the Cluster. + | { } |
|
||||
|===
|
||||
|
||||
|
||||
@@ -178,13 +211,53 @@ Example: ["--node-name=my-agent-node"] + | |
|
||||
| *`addons`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon[$$Addon$$] array__ | Addons specifies secrets containing raw YAML to deploy on cluster startup. + | |
|
||||
| *`serverLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | ServerLimit specifies resource limits for server nodes. + | |
|
||||
| *`workerLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | WorkerLimit specifies resource limits for agent nodes. + | |
|
||||
| *`serverAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | ServerAffinity specifies the affinity rules for server pods. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`agentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | AgentAffinity specifies the affinity rules for agent pods. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`mirrorHostNodes`* __boolean__ | MirrorHostNodes controls whether node objects from the host cluster +
|
||||
are mirrored into the virtual cluster. + | |
|
||||
| *`customCAs`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]__ | CustomCAs specifies the cert/key pairs for custom CA certificates. + | |
|
||||
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|
||||
| *`secretMounts`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretmount[$$SecretMount$$] array__ | SecretMounts specifies a list of secrets to mount into server and agent pods. +
|
||||
Each entry defines a secret and its mount path within the pods. + | |
|
||||
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | SecurityContext specifies custom SecurityContext to be added +
|
||||
to the agent and server pods of the cluster in virtual or shared mode. +
|
||||
This option will override the SecurityContext set by default for virtual mode. + | |
|
||||
| *`runtimeClassName`* __string__ | RuntimeClassName specifies alternative runtime class for the +
|
||||
agent and server pods of the cluster in virtual or shared mode. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus"]
|
||||
=== ClusterStatus
|
||||
|
||||
|
||||
|
||||
ClusterStatus reflects the observed state of a Cluster.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`hostVersion`* __string__ | HostVersion is the Kubernetes version of the host node. + | |
|
||||
| *`clusterCIDR`* __string__ | ClusterCIDR is the CIDR range for pod IPs. + | |
|
||||
| *`serviceCIDR`* __string__ | ServiceCIDR is the CIDR range for service IPs. + | |
|
||||
| *`clusterDNS`* __string__ | ClusterDNS is the IP address for the CoreDNS service. + | |
|
||||
| *`tlsSANs`* __string array__ | TLSSANs specifies subject alternative names for the K3s server certificate. + | |
|
||||
| *`policyName`* __string__ | PolicyName specifies the virtual cluster policy name bound to the virtual cluster. + | |
|
||||
| *`policy`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]__ | policy represents the status of the policy applied to this cluster. +
|
||||
This field is set by the VirtualClusterPolicy controller. + | |
|
||||
| *`kubeletPort`* __integer__ | KubeletPort specefies the port used by k3k-kubelet in shared mode. + | |
|
||||
| *`conditions`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta[$$Condition$$] array__ | Conditions are the individual conditions for the cluster set. + | |
|
||||
| *`phase`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterphase[$$ClusterPhase$$]__ | Phase is a high-level summary of the cluster's current lifecycle state. + | Unknown | Enum: [Pending Provisioning Ready Failed Terminating Unknown] +
|
||||
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig"]
|
||||
@@ -192,7 +265,7 @@ are mirrored into the virtual cluster. + | |
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
|
||||
|
||||
|
||||
|
||||
@@ -226,10 +299,9 @@ _Appears In:_
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`secretName`* __string__ | SecretName specifies the name of an existing secret to use. +
|
||||
The controller expects specific keys inside based on the credential type: +
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'. +
|
||||
- For ServiceAccountTokenKey: 'tls.key'. + | |
|
||||
| *`secretName`* __string__ | The secret must contain specific keys based on the credential type: +
|
||||
- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`. +
|
||||
- For the ServiceAccountToken signing key: `tls.key`. + | |
|
||||
|===
|
||||
|
||||
|
||||
@@ -328,7 +400,7 @@ _Appears In:_
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
IngressSyncConfig specifies the sync options for Ingresses.
|
||||
|
||||
|
||||
|
||||
@@ -342,6 +414,8 @@ _Appears In:_
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
| *`disableTLSSecretTranslation`* __boolean__ | DisableTLSSecretTranslation is an on/off switch for translating TLS secrets +
|
||||
from virtual cluster to host cluster + | false |
|
||||
|===
|
||||
|
||||
|
||||
@@ -414,7 +488,7 @@ _Appears In:_
|
||||
| *`type`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode[$$PersistenceMode$$]__ | Type specifies the persistence mode. + | dynamic |
|
||||
| *`storageClassName`* __string__ | StorageClassName is the name of the StorageClass to use for the PVC. +
|
||||
This field is only relevant in "dynamic" mode. + | |
|
||||
| *`storageRequestSize`* __string__ | StorageRequestSize is the requested size for the PVC. +
|
||||
| *`storageRequestSize`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#quantity-resource-api[$$Quantity$$]__ | StorageRequestSize is the requested size for the PVC. +
|
||||
This field is only relevant in "dynamic" mode. + | 2G |
|
||||
|===
|
||||
|
||||
@@ -439,7 +513,7 @@ _Appears In:_
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
|
||||
|
||||
|
||||
|
||||
@@ -477,7 +551,7 @@ _Appears In:_
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
|
||||
|
||||
|
||||
|
||||
@@ -494,12 +568,57 @@ then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretmount"]
|
||||
=== SecretMount
|
||||
|
||||
|
||||
|
||||
SecretMount defines a secret to be mounted into server or agent pods,
|
||||
allowing for custom configurations, certificates, or other sensitive data.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`secretName`* __string__ | secretName is the name of the secret in the pod's namespace to use. +
|
||||
More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + | |
|
||||
| *`items`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#keytopath-v1-core[$$KeyToPath$$] array__ | items If unspecified, each key-value pair in the Data field of the referenced +
|
||||
Secret will be projected into the volume as a file whose name is the +
|
||||
key and content is the value. If specified, the listed keys will be +
|
||||
projected into the specified paths, and unlisted keys will not be +
|
||||
present. If a key is specified which is not present in the Secret, +
|
||||
the volume setup will error unless it is marked optional. Paths must be +
|
||||
relative and may not contain the '..' path or start with '..'. + | |
|
||||
| *`defaultMode`* __integer__ | defaultMode is Optional: mode bits used to set permissions on created files by default. +
|
||||
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. +
|
||||
YAML accepts both octal and decimal values, JSON requires decimal values +
|
||||
for mode bits. Defaults to 0644. +
|
||||
Directories within the path are not affected by this setting. +
|
||||
This might be in conflict with other options that affect the file +
|
||||
mode, like fsGroup, and the result can be other mode bits set. + | |
|
||||
| *`optional`* __boolean__ | optional field specify whether the Secret or its keys must be defined + | |
|
||||
| *`mountPath`* __string__ | MountPath is the path within server and agent pods where the +
|
||||
secret contents will be mounted. + | |
|
||||
| *`subPath`* __string__ | SubPath is an optional path within the secret to mount instead of the root. +
|
||||
When specified, only the specified key from the secret will be mounted as a file +
|
||||
at MountPath, keeping the parent directory writable. + | |
|
||||
| *`role`* __string__ | Role is the type of the k3k pod that will be used to mount the secret. +
|
||||
This can be 'server', 'agent', or 'all' (for both). + | | Enum: [server agent all] +
|
||||
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig"]
|
||||
=== SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
SecretSyncConfig specifies the sync options for Secrets.
|
||||
|
||||
|
||||
|
||||
@@ -521,7 +640,7 @@ then all resources of the given type will be synced. + | |
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
ServiceSyncConfig specifies the sync options for Services.
|
||||
|
||||
|
||||
|
||||
@@ -538,6 +657,28 @@ then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig"]
|
||||
=== StorageClassSyncConfig
|
||||
|
||||
|
||||
|
||||
StorageClassSyncConfig specifies the sync options for StorageClasses.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
|
||||
=== SyncConfig
|
||||
|
||||
@@ -549,6 +690,7 @@ SyncConfig will contain the resources that should be synced from virtual cluster
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
|
||||
|
||||
@@ -561,6 +703,7 @@ _Appears In:_
|
||||
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
|
||||
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
|
||||
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
|
||||
| *`storageClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig[$$StorageClassSyncConfig$$]__ | StorageClasses resources sync configuration. + | { enabled:false } |
|
||||
|===
|
||||
|
||||
|
||||
@@ -586,6 +729,7 @@ _Appears In:_
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]__ | Spec defines the desired state of the VirtualClusterPolicy. + | { } |
|
||||
| *`status`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicystatus[$$VirtualClusterPolicyStatus$$]__ | Status reflects the observed state of the VirtualClusterPolicy. + | |
|
||||
|===
|
||||
|
||||
|
||||
@@ -632,14 +776,43 @@ _Appears In:_
|
||||
to set defaults and constraints (min/max) + | |
|
||||
| *`defaultNodeSelector`* __object (keys:string, values:string)__ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. + | |
|
||||
| *`defaultPriorityClass`* __string__ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. + | |
|
||||
| *`defaultServerAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`defaultAgentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace. +
|
||||
This includes both node affinity and pod affinity/anti-affinity rules. + | |
|
||||
| *`allowedMode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". + | shared | Enum: [shared virtual] +
|
||||
|
||||
| *`disableNetworkPolicy`* __boolean__ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. + | |
|
||||
| *`podSecurityAdmissionLevel`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel[$$PodSecurityAdmissionLevel$$]__ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. + | | Enum: [privileged baseline restricted] +
|
||||
|
||||
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|
||||
| *`runtimeClassName`* __string__ | SecurityContext specifies custom SecurityContext to be added +
|
||||
to the agent and server pods of the cluster in virtual or shared mode. + | |
|
||||
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | RuntimeClassName specifies alternative runtime class for the +
|
||||
agent and server pods of the cluster in virtual or shared mode. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicystatus"]
|
||||
=== VirtualClusterPolicyStatus
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`observedGeneration`* __integer__ | ObservedGeneration was the generation at the time the status was updated. + | |
|
||||
| *`lastUpdateTime`* __string__ | LastUpdate is the timestamp when the status was last updated. + | |
|
||||
| *`summary`* __string__ | Summary is a summary of the status. + | |
|
||||
| *`conditions`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta[$$Condition$$] array__ | Conditions are the individual conditions for the cluster set. + | |
|
||||
|===
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,29 @@ _Appears in:_
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
|
||||
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
|
||||
| `serverAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `agentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. | | |
|
||||
| `runtimeClassName` _string_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode. | | |
|
||||
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
@@ -51,6 +74,7 @@ _Appears in:_
|
||||
| `kind` _string_ | `Cluster` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
| `status` _[ClusterStatus](#clusterstatus)_ | Status reflects the observed state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
@@ -132,18 +156,46 @@ _Appears in:_
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
| `serverAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | ServerAffinity specifies the affinity rules for server pods.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `agentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | AgentAffinity specifies the affinity rules for agent pods.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
|
||||
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
| `secretMounts` _[SecretMount](#secretmount) array_ | SecretMounts specifies a list of secrets to mount into server and agent pods.<br />Each entry defines a secret and its mount path within the pods. | | |
|
||||
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode.<br />This option will override the SecurityContext set by default for virtual mode. | | |
|
||||
| `runtimeClassName` _string_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
|
||||
|
||||
|
||||
#### ClusterStatus
|
||||
|
||||
|
||||
|
||||
ClusterStatus reflects the observed state of a Cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [Cluster](#cluster)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `hostVersion` _string_ | HostVersion is the Kubernetes version of the host node. | | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `policyName` _string_ | PolicyName specifies the virtual cluster policy name bound to the virtual cluster. | | |
|
||||
| `policy` _[AppliedPolicy](#appliedpolicy)_ | policy represents the status of the policy applied to this cluster.<br />This field is set by the VirtualClusterPolicy controller. | | |
|
||||
| `kubeletPort` _integer_ | KubeletPort specefies the port used by k3k-kubelet in shared mode. | | |
|
||||
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | Conditions are the individual conditions for the cluster set. | | |
|
||||
| `phase` _[ClusterPhase](#clusterphase)_ | Phase is a high-level summary of the cluster's current lifecycle state. | Unknown | Enum: [Pending Provisioning Ready Failed Terminating Unknown] <br /> |
|
||||
|
||||
|
||||
#### ConfigMapSyncConfig
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
|
||||
|
||||
|
||||
|
||||
@@ -170,7 +222,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
|
||||
| `secretName` _string_ | The secret must contain specific keys based on the credential type:<br />- For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.<br />- For the ServiceAccountToken signing key: `tls.key`. | | |
|
||||
|
||||
|
||||
#### CredentialSources
|
||||
@@ -251,7 +303,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
IngressSyncConfig specifies the sync options for Ingresses.
|
||||
|
||||
|
||||
|
||||
@@ -262,6 +314,7 @@ _Appears in:_
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
| `disableTLSSecretTranslation` _boolean_ | DisableTLSSecretTranslation is an on/off switch for translating TLS secrets<br />from virtual cluster to host cluster | false | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
@@ -313,7 +366,7 @@ _Appears in:_
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
|
||||
| `storageRequestSize` _[Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#quantity-resource-api)_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
@@ -333,7 +386,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
|
||||
|
||||
|
||||
|
||||
@@ -364,7 +417,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
|
||||
|
||||
|
||||
|
||||
@@ -377,11 +430,34 @@ _Appears in:_
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SecretMount
|
||||
|
||||
|
||||
|
||||
SecretMount defines a secret to be mounted into server or agent pods,
|
||||
allowing for custom configurations, certificates, or other sensitive data.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretName` _string_ | secretName is the name of the secret in the pod's namespace to use.<br />More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | | |
|
||||
| `items` _[KeyToPath](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#keytopath-v1-core) array_ | items If unspecified, each key-value pair in the Data field of the referenced<br />Secret will be projected into the volume as a file whose name is the<br />key and content is the value. If specified, the listed keys will be<br />projected into the specified paths, and unlisted keys will not be<br />present. If a key is specified which is not present in the Secret,<br />the volume setup will error unless it is marked optional. Paths must be<br />relative and may not contain the '..' path or start with '..'. | | |
|
||||
| `defaultMode` _integer_ | defaultMode is Optional: mode bits used to set permissions on created files by default.<br />Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.<br />YAML accepts both octal and decimal values, JSON requires decimal values<br />for mode bits. Defaults to 0644.<br />Directories within the path are not affected by this setting.<br />This might be in conflict with other options that affect the file<br />mode, like fsGroup, and the result can be other mode bits set. | | |
|
||||
| `optional` _boolean_ | optional field specify whether the Secret or its keys must be defined | | |
|
||||
| `mountPath` _string_ | MountPath is the path within server and agent pods where the<br />secret contents will be mounted. | | |
|
||||
| `subPath` _string_ | SubPath is an optional path within the secret to mount instead of the root.<br />When specified, only the specified key from the secret will be mounted as a file<br />at MountPath, keeping the parent directory writable. | | |
|
||||
| `role` _string_ | Role is the type of the k3k pod that will be used to mount the secret.<br />This can be 'server', 'agent', or 'all' (for both). | | Enum: [server agent all] <br /> |
|
||||
|
||||
|
||||
#### SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
SecretSyncConfig specifies the sync options for Secrets.
|
||||
|
||||
|
||||
|
||||
@@ -398,7 +474,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
ServiceSyncConfig specifies the sync options for Services.
|
||||
|
||||
|
||||
|
||||
@@ -411,6 +487,23 @@ _Appears in:_
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### StorageClassSyncConfig
|
||||
|
||||
|
||||
|
||||
StorageClassSyncConfig specifies the sync options for StorageClasses.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SyncConfig
|
||||
|
||||
|
||||
@@ -420,6 +513,7 @@ SyncConfig will contain the resources that should be synced from virtual cluster
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [AppliedPolicy](#appliedpolicy)
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
@@ -431,6 +525,7 @@ _Appears in:_
|
||||
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
|
||||
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `storageClasses` _[StorageClassSyncConfig](#storageclasssyncconfig)_ | StorageClasses resources sync configuration. | \{ enabled:false \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicy
|
||||
@@ -451,6 +546,7 @@ _Appears in:_
|
||||
| `kind` _string_ | `VirtualClusterPolicy` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
|
||||
| `status` _[VirtualClusterPolicyStatus](#virtualclusterpolicystatus)_ | Status reflects the observed state of the VirtualClusterPolicy. | | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicyList
|
||||
@@ -488,11 +584,32 @@ _Appears in:_
|
||||
| `limit` _[LimitRangeSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core)_ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy<br />to set defaults and constraints (min/max) | | |
|
||||
| `defaultNodeSelector` _object (keys:string, values:string)_ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. | | |
|
||||
| `defaultPriorityClass` _string_ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. | | |
|
||||
| `defaultServerAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `defaultAgentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
|
||||
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
|
||||
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
|
||||
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
| `runtimeClassName` _string_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode. | | |
|
||||
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicyStatus
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicy](#virtualclusterpolicy)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `observedGeneration` _integer_ | ObservedGeneration was the generation at the time the status was updated. | | |
|
||||
| `lastUpdateTime` _string_ | LastUpdate is the timestamp when the status was last updated. | | |
|
||||
| `summary` _string_ | Summary is a summary of the status. | | |
|
||||
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | Conditions are the individual conditions for the cluster set. | | |
|
||||
|
||||
|
||||
|
||||
@@ -11,6 +11,18 @@ To start developing K3k you will need:
|
||||
- A running Kubernetes cluster
|
||||
|
||||
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> Virtual clusters in shared mode need to have a configured storage provider, unless the `--persistence-type ephemeral` flag is used.
|
||||
>
|
||||
> To install the [`local-path-provisioner`](https://github.com/rancher/local-path-provisioner) and set it as the default storage class you can run:
|
||||
>
|
||||
> ```
|
||||
> kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.34/deploy/local-path-storage.yaml
|
||||
> kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
> ```
|
||||
|
||||
|
||||
### TLDR
|
||||
|
||||
```shell
|
||||
@@ -39,13 +51,19 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
push Push the K3k images to the registry
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-kubelet-controller Run the controller tests (pkg/controller)
|
||||
test-unit Run the unit tests (skips the e2e and integration tests)
|
||||
test-kubelet Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
|
||||
test-policy Run the policy controller tests (tests/integration/policy)
|
||||
test-cluster Run the cluster controller tests (tests/integration/cluster)
|
||||
test-integration Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
test-cli Run the cli tests
|
||||
generate Generate the CRDs specs
|
||||
docs Build the CRDs and CLI docs
|
||||
docs-crds Build the CRDs docs
|
||||
docs-cli Build the CLI docs
|
||||
lint Find any linting issues in the project
|
||||
fmt Format source files in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
help Show this help.
|
||||
@@ -80,7 +98,20 @@ Once you have your images available you can install K3k with the `make install`
|
||||
|
||||
## Tests
|
||||
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`, `test-cli`).
|
||||
|
||||
When running the tests the namespaces used are cleaned up. If you want to keep them to debug you can use the `KEEP_NAMESPACES`, i.e.:
|
||||
|
||||
```
|
||||
KEEP_NAMESPACES=true make test-e2e
|
||||
```
|
||||
|
||||
The e2e and cli tests run against the cluster configured in your KUBECONFIG environment variable. Running the tests with the `K3K_DOCKER_INSTALL` environment variable set will use `tescontainers` instead:
|
||||
|
||||
```
|
||||
K3K_DOCKER_INSTALL=true make test-e2e
|
||||
```
|
||||
|
||||
|
||||
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
|
||||
@@ -153,3 +184,7 @@ Last thing to do is to get the kubeconfig to connect to the virtual cluster we'v
|
||||
```bash
|
||||
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
|
||||
```
|
||||
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Because of technical limitation is not possible to create virtual clusters in `virtual` mode with K3d, or any other dockerized environment (Kind, Minikube)
|
||||
|
||||
@@ -167,7 +167,7 @@ kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-custom-k8s
|
||||
spec:
|
||||
version: "v1.33.1-k3s1"
|
||||
version: "v1.35.2-k3s1"
|
||||
```
|
||||
|
||||
This sets the virtual cluster's Kubernetes version explicitly.
|
||||
@@ -178,7 +178,7 @@ This sets the virtual cluster's Kubernetes version explicitly.
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--version v1.33.1-k3s1 \
|
||||
--version v1.35.2-k3s1 \
|
||||
k3kcluster-custom-k8s
|
||||
```
|
||||
|
||||
|
||||
225
go.mod
225
go.mod
@@ -1,58 +1,53 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.24.10
|
||||
go 1.25.0
|
||||
|
||||
replace (
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.20.1
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.64.0
|
||||
golang.org/x/term => golang.org/x/term v0.15.0
|
||||
)
|
||||
toolchain go1.25.9
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.2
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.36.0
|
||||
github.com/onsi/ginkgo/v2 v2.28.1
|
||||
github.com/onsi/gomega v1.39.1
|
||||
github.com/rancher/dynamiclistener v1.27.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/sirupsen/logrus v1.9.4
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.35.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
|
||||
go.etcd.io/etcd/api/v3 v3.5.16
|
||||
go.etcd.io/etcd/client/v3 v3.5.16
|
||||
go.uber.org/zap v1.27.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.14.4
|
||||
k8s.io/api v0.31.13
|
||||
k8s.io/apiextensions-apiserver v0.31.13
|
||||
k8s.io/apimachinery v0.31.13
|
||||
k8s.io/apiserver v0.31.13
|
||||
k8s.io/cli-runtime v0.31.13
|
||||
k8s.io/client-go v0.31.13
|
||||
k8s.io/component-base v0.31.13
|
||||
k8s.io/component-helpers v0.31.13
|
||||
k8s.io/kubectl v0.31.13
|
||||
k8s.io/kubelet v0.31.13
|
||||
k8s.io/kubernetes v1.31.13
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.19.4
|
||||
github.com/testcontainers/testcontainers-go v0.41.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.12.0
|
||||
go.etcd.io/etcd/api/v3 v3.6.10
|
||||
go.etcd.io/etcd/client/v3 v3.6.10
|
||||
go.uber.org/zap v1.27.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.20.1
|
||||
k8s.io/api v0.35.3
|
||||
k8s.io/apiextensions-apiserver v0.35.3
|
||||
k8s.io/apimachinery v0.35.3
|
||||
k8s.io/apiserver v0.35.3
|
||||
k8s.io/cli-runtime v0.35.3
|
||||
k8s.io/client-go v0.35.3
|
||||
k8s.io/component-base v0.35.3
|
||||
k8s.io/component-helpers v0.35.3
|
||||
k8s.io/kubectl v0.35.3
|
||||
k8s.io/kubelet v0.35.3
|
||||
k8s.io/kubernetes v1.35.3
|
||||
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5
|
||||
sigs.k8s.io/controller-runtime v0.23.3
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
cel.dev/expr v0.25.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.6.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
@@ -60,36 +55,33 @@ require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.24 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/containerd v1.7.30 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v27.1.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/docker v28.5.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/ebitengine/purego v0.10.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
@@ -104,33 +96,31 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
@@ -139,91 +129,90 @@ require (
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/go-archive v0.2.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.10 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.43.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/crypto v0.48.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.42.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
golang.org/x/mod v0.33.0 // indirect
|
||||
golang.org/x/net v0.51.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/sys v0.42.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
golang.org/x/text v0.35.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
golang.org/x/tools v0.42.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
google.golang.org/grpc v1.67.3 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/grpc v1.79.3 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/controller-manager v0.35.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kms v0.31.13 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
k8s.io/kms v0.35.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
@@ -14,7 +14,6 @@ type config struct {
|
||||
HostKubeconfig string `mapstructure:"hostKubeconfig"`
|
||||
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
|
||||
KubeletPort int `mapstructure:"kubeletPort"`
|
||||
WebhookPort int `mapstructure:"webhookPort"`
|
||||
ServerIP string `mapstructure:"serverIP"`
|
||||
Version string `mapstructure:"version"`
|
||||
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`
|
||||
|
||||
@@ -100,7 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -92,13 +92,22 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
appliedSync := cluster.Spec.Sync.DeepCopy()
|
||||
|
||||
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
|
||||
appliedSync = cluster.Status.Policy.Sync
|
||||
}
|
||||
|
||||
syncConfig := appliedSync.Ingresses
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
syncedIngress := r.ingress(&virtIngress, syncConfig.DisableTLSSecretTranslation)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -143,7 +152,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
|
||||
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress, disableTLSSecretTranslation bool) *networkingv1.Ingress {
|
||||
hostIngress := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostIngress)
|
||||
|
||||
@@ -157,6 +166,17 @@ func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ing
|
||||
}
|
||||
}
|
||||
}
|
||||
// don't sync finalizers to the host
|
||||
|
||||
// TLS Secret translation disable, return early without translating TLS secrets in the ingress spec
|
||||
if disableTLSSecretTranslation {
|
||||
return hostIngress
|
||||
}
|
||||
// ensure tls secrets are also translated
|
||||
for i := range hostIngress.Spec.TLS {
|
||||
if hostIngress.Spec.TLS[i].SecretName != "" {
|
||||
hostIngress.Spec.TLS[i].SecretName = s.Translator.TranslateName(obj.GetNamespace(), hostIngress.Spec.TLS[i].SecretName)
|
||||
}
|
||||
}
|
||||
|
||||
return hostIngress
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -131,8 +131,19 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
}
|
||||
|
||||
var currentHostPVC v1.PersistentVolumeClaim
|
||||
|
||||
err := r.HostClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(syncedPVC), ¤tHostPVC)
|
||||
if err == nil {
|
||||
log.V(1).Info("persistent volume claim already exist in the host cluster")
|
||||
}
|
||||
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume claim for the first time on the host cluster")
|
||||
log.Info("creating the persistent volume claim for the first time in the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
|
||||
@@ -117,7 +117,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,120 +2,35 @@ package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
webhookName = "podmutating.k3k.io"
|
||||
webhookTimeout = int32(10)
|
||||
webhookPath = "/mutate--v1-pod"
|
||||
FieldpathField = "k3k.io/fieldpath"
|
||||
)
|
||||
const webhookName = "podmutating.k3k.io"
|
||||
|
||||
type webhookHandler struct {
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
serviceName string
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
logger logr.Logger
|
||||
webhookPort int
|
||||
}
|
||||
|
||||
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
logger: logger,
|
||||
serviceName: serviceName,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
webhookPort: webhookPort,
|
||||
func RemovePodMutatingWebhook(ctx context.Context, virtualClient, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) error {
|
||||
webhookSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controller.SafeConcatNameWithPrefix(clusterName, "webhook"),
|
||||
Namespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
// create mutating webhook configuration to the cluster
|
||||
config, err := handler.configuration(ctx, hostClient)
|
||||
if err != nil {
|
||||
if err := hostClient.Delete(ctx, webhookSecret); !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := handler.client.Create(ctx, config); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// register webhook with the manager
|
||||
return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete()
|
||||
}
|
||||
|
||||
func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
|
||||
}
|
||||
|
||||
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
// look for status.* fields in the env
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, env := range container.Env {
|
||||
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := env.ValueFrom.FieldRef.FieldPath
|
||||
if strings.Contains(fieldPath, "status.") {
|
||||
annotationKey := fmt.Sprintf("%s_%d_%s", FieldpathField, i, env.Name)
|
||||
pod.Annotations[annotationKey] = fieldPath
|
||||
pod.Spec.Containers[i].Env = removeEnv(pod.Spec.Containers[i].Env, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Info("extracting webhook tls from host cluster")
|
||||
|
||||
var webhookTLSSecret v1.Secret
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
|
||||
if !ok {
|
||||
return nil, errors.New("webhook CABundle does not exist in secret")
|
||||
}
|
||||
|
||||
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
|
||||
|
||||
return &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
webhook := &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "admissionregistration.k8s.io/v1",
|
||||
Kind: "MutatingWebhookConfiguration",
|
||||
@@ -123,51 +38,11 @@ func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlrunti
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookName + "-configuration",
|
||||
},
|
||||
Webhooks: []admissionregistrationv1.MutatingWebhook{
|
||||
{
|
||||
Name: webhookName,
|
||||
AdmissionReviewVersions: []string{"v1"},
|
||||
SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone),
|
||||
TimeoutSeconds: ptr.To(webhookTimeout),
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: ptr.To(webhookURL),
|
||||
CABundle: caBundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
{
|
||||
Operations: []admissionregistrationv1.OperationType{
|
||||
"CREATE",
|
||||
},
|
||||
Rule: admissionregistrationv1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
Scope: ptr.To(admissionregistrationv1.NamespacedScope),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func removeEnv(envs []v1.EnvVar, i int) []v1.EnvVar {
|
||||
envs[i] = envs[len(envs)-1]
|
||||
return envs[:len(envs)-1]
|
||||
}
|
||||
|
||||
func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
|
||||
s := strings.SplitN(annotationKey, "_", 3)
|
||||
if len(s) != 3 {
|
||||
return -1, "", errors.New("fieldpath annotation is not set correctly")
|
||||
}
|
||||
|
||||
containerIndex, err := strconv.Atoi(s[1])
|
||||
if err != nil {
|
||||
return -1, "", err
|
||||
if err := virtualClient.Delete(ctx, webhook); !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
envName := s[2]
|
||||
|
||||
return containerIndex, envName, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,13 +2,8 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
@@ -26,9 +21,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@@ -71,7 +64,7 @@ type kubelet struct {
|
||||
token string
|
||||
}
|
||||
|
||||
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
|
||||
func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -84,7 +77,7 @@ func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -94,8 +87,6 @@ func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctrl.SetLogger(logger)
|
||||
|
||||
hostMetricsBindAddress := ":8083"
|
||||
virtualMetricsBindAddress := ":8084"
|
||||
|
||||
@@ -128,14 +119,8 @@ func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, e
|
||||
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: "/opt/rancher/k3k-webhook",
|
||||
Port: c.WebhookPort,
|
||||
})
|
||||
|
||||
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: "kube-system",
|
||||
LeaderElectionID: c.ClusterName,
|
||||
@@ -147,10 +132,10 @@ func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, e
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutating webhook")
|
||||
logger.Info("removing pod mutating webhook")
|
||||
|
||||
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
|
||||
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
|
||||
if err := k3kwebhook.RemovePodMutatingWebhook(ctx, virtualMgr.GetClient(), hostClient, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("unable to remove pod mutating webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
|
||||
@@ -208,20 +193,6 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
func (k *kubelet) registerNode(agentIP string, cfg config) error {
|
||||
providerFunc := k.newProviderFunc(cfg)
|
||||
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
|
||||
if err != nil {
|
||||
return errors.New("unable to start kubelet: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kubelet) start(ctx context.Context) {
|
||||
// any one of the following 3 tasks (host manager, virtual manager, node) crashing will stop the
|
||||
// program, and all 3 of them block on start, so we start them here in go-routines
|
||||
@@ -265,40 +236,29 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
|
||||
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP, cfg.AgentHostname)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
|
||||
}
|
||||
|
||||
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
|
||||
err = provider.ConfigureNode(
|
||||
k.logger,
|
||||
pc.Node,
|
||||
cfg.AgentHostname,
|
||||
k.port,
|
||||
k.agentIP,
|
||||
utilProvider.Host.Manager,
|
||||
utilProvider.Virtual.Client,
|
||||
k.virtualCluster,
|
||||
cfg.Version,
|
||||
cfg.MirrorHostNodes,
|
||||
)
|
||||
|
||||
return utilProvider, &provider.Node{}, nil
|
||||
return utilProvider, &provider.Node{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
|
||||
// set up the routes
|
||||
mux := http.NewServeMux()
|
||||
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
|
||||
return errors.New("unable to attach routes: " + err.Error())
|
||||
}
|
||||
|
||||
c.Handler = mux
|
||||
|
||||
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
|
||||
c.TLSConfig = tlsConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
|
||||
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string) (*rest.Config, error) {
|
||||
if virtualConfigPath != "" {
|
||||
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
|
||||
}
|
||||
@@ -316,8 +276,10 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
logger.Error(err, "decoded bootstrap")
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
@@ -368,58 +330,6 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
|
||||
return clientcmd.Write(*config)
|
||||
}
|
||||
|
||||
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
// POD IP
|
||||
podIP := net.ParseIP(os.Getenv("POD_IP"))
|
||||
ip := net.ParseIP(agentIP)
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{hostname},
|
||||
IPs: []net.IP{ip, podIP},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get cert and key: " + err.Error())
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get key pair: " + err.Error())
|
||||
}
|
||||
|
||||
// create rootCA CertPool
|
||||
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create ca certs: " + err.Error())
|
||||
}
|
||||
|
||||
if len(certs) < 1 {
|
||||
return nil, errors.New("ca cert is not parsed correctly")
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(certs[0])
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
@@ -37,7 +36,8 @@ func main() {
|
||||
}
|
||||
|
||||
logger = zapr.NewLogger(log.New(debug, logFormat))
|
||||
ctrlruntimelog.SetLogger(logger)
|
||||
ctrl.SetLogger(logger)
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: run,
|
||||
@@ -51,7 +51,6 @@ func main() {
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
|
||||
@@ -65,18 +64,20 @@ func main() {
|
||||
}
|
||||
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
ctx := cmd.Context()
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
return fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
k, err := newKubelet(ctx, &cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
|
||||
}
|
||||
|
||||
if err := k.registerNode(k.agentIP, cfg); err != nil {
|
||||
podIP := os.Getenv("POD_IP")
|
||||
|
||||
if err := k.registerNode(k.agentIP, podIP, cfg); err != nil {
|
||||
return fmt.Errorf("failed to register new node: %w", err)
|
||||
}
|
||||
|
||||
|
||||
114
k3k-kubelet/node.go
Normal file
114
k3k-kubelet/node.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
)
|
||||
|
||||
func (k *kubelet) registerNode(agentIP, podIP string, cfg config) error {
|
||||
tlsConfig, err := loadTLSConfig(cfg, k.name, k.token, agentIP, podIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
node, err := nodeutil.NewNode(
|
||||
k.name,
|
||||
k.newProviderFunc(cfg),
|
||||
nodeutil.WithClient(k.virtClient),
|
||||
nodeutil.AttachProviderRoutes(mux),
|
||||
nodeOpt(mux, tlsConfig, cfg.KubeletPort),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.New("unable to start kubelet: " + err.Error())
|
||||
}
|
||||
|
||||
k.node = node
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func nodeOpt(mux *http.ServeMux, tlsConfig *tls.Config, port int) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.Handler = mux
|
||||
c.TLSConfig = tlsConfig
|
||||
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%d", port)
|
||||
|
||||
c.NodeSpec.Labels["kubernetes.io/role"] = "worker"
|
||||
c.NodeSpec.Labels["node-role.kubernetes.io/worker"] = "true"
|
||||
|
||||
c.SkipDownwardAPIResolution = true
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func loadTLSConfig(cfg config, nodeName, token, agentIP, podIP string) (*tls.Config, error) {
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cfg.ClusterName), cfg.ClusterNamespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{cfg.AgentHostname},
|
||||
IPs: []net.IP{
|
||||
net.ParseIP(agentIP),
|
||||
net.ParseIP(podIP),
|
||||
},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get cert and key: " + err.Error())
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get key pair: " + err.Error())
|
||||
}
|
||||
|
||||
// create rootCA CertPool
|
||||
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create ca certs: " + err.Error())
|
||||
}
|
||||
|
||||
if len(certs) < 1 {
|
||||
return nil, errors.New("ca cert is not parsed correctly")
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(certs[0])
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
@@ -2,26 +2,26 @@ package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if mirrorHostNodes {
|
||||
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring", err)
|
||||
var hostNode corev1.Node
|
||||
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring", "node", node.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
@@ -50,17 +50,10 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error(err, "error updating node capacity")
|
||||
}
|
||||
}
|
||||
}()
|
||||
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nodeConditions returns the basic conditions which mark the node as ready
|
||||
@@ -108,73 +101,3 @@ func nodeConditions() []corev1.NodeCondition {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
|
||||
// If the nodeLabels are specified only the matching nodes will be considered.
|
||||
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var virtualNode corev1.Node
|
||||
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualNode.Status.Capacity = capacity
|
||||
virtualNode.Status.Allocatable = allocatable
|
||||
|
||||
return virtualClient.Status().Update(ctx, &virtualNode)
|
||||
}
|
||||
|
||||
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
|
||||
// If some node labels are specified only the matching nodes will be considered.
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if nodeLabels != nil {
|
||||
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
|
||||
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
|
||||
}
|
||||
|
||||
nodeList, err := coreClient.Nodes().List(ctx, listOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// sum all
|
||||
virtualCapacityResources := corev1.ResourceList{}
|
||||
virtualAvailableResources := corev1.ResourceList{}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
// check if the node is Ready
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type != corev1.NodeReady {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the node is not Ready then we can skip it
|
||||
if condition.Status != corev1.ConditionTrue {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// add all the available metrics to the virtual node
|
||||
for resourceName, resourceQuantity := range node.Status.Capacity {
|
||||
virtualResource := virtualCapacityResources[resourceName]
|
||||
|
||||
(&virtualResource).Add(resourceQuantity)
|
||||
virtualCapacityResources[resourceName] = virtualResource
|
||||
}
|
||||
|
||||
for resourceName, resourceQuantity := range node.Status.Allocatable {
|
||||
virtualResource := virtualAvailableResources[resourceName]
|
||||
|
||||
(&virtualResource).Add(resourceQuantity)
|
||||
virtualAvailableResources[resourceName] = virtualResource
|
||||
}
|
||||
}
|
||||
|
||||
return virtualCapacityResources, virtualAvailableResources, nil
|
||||
}
|
||||
|
||||
243
k3k-kubelet/provider/configure_capacity.go
Normal file
243
k3k-kubelet/provider/configure_capacity.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
// UpdateNodeCapacityInterval is the interval at which the node capacity is updated.
|
||||
UpdateNodeCapacityInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// milliScaleResources is a set of resource names that are measured in milli-units (e.g., CPU).
|
||||
// This is used to determine whether to use MilliValue() for calculations.
|
||||
var milliScaleResources = map[corev1.ResourceName]struct{}{
|
||||
corev1.ResourceCPU: {},
|
||||
corev1.ResourceMemory: {},
|
||||
corev1.ResourceStorage: {},
|
||||
corev1.ResourceEphemeralStorage: {},
|
||||
corev1.ResourceRequestsCPU: {},
|
||||
corev1.ResourceRequestsMemory: {},
|
||||
corev1.ResourceRequestsStorage: {},
|
||||
corev1.ResourceRequestsEphemeralStorage: {},
|
||||
corev1.ResourceLimitsCPU: {},
|
||||
corev1.ResourceLimitsMemory: {},
|
||||
corev1.ResourceLimitsEphemeralStorage: {},
|
||||
}
|
||||
|
||||
// StartNodeCapacityUpdater starts a goroutine that periodically updates the capacity
|
||||
// of the virtual node based on host node capacity and any applied ResourceQuotas.
|
||||
func startNodeCapacityUpdater(ctx context.Context, logger logr.Logger, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, virtualNodeName string) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(UpdateNodeCapacityInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
updateNodeCapacity(ctx, logger, hostClient, virtualClient, virtualCluster, virtualNodeName)
|
||||
case <-ctx.Done():
|
||||
logger.Info("Stopping node capacity updates for node", "node", virtualNodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
|
||||
// If the nodeLabels are specified only the matching nodes will be considered.
|
||||
func updateNodeCapacity(ctx context.Context, logger logr.Logger, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, virtualNodeName string) {
|
||||
// by default we get the resources of the same Node where the kubelet is running
|
||||
var node corev1.Node
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &node); err != nil {
|
||||
logger.Error(err, "error getting virtual node for updating node capacity")
|
||||
return
|
||||
}
|
||||
|
||||
allocatable := node.Status.Allocatable.DeepCopy()
|
||||
|
||||
// we need to check if the virtual cluster resources are "limited" through ResourceQuotas
|
||||
// If so we will use the minimum resources
|
||||
|
||||
var quotas corev1.ResourceQuotaList
|
||||
if err := hostClient.List(ctx, "as, &client.ListOptions{Namespace: virtualCluster.Namespace}); err != nil {
|
||||
logger.Error(err, "error getting namespace for updating node capacity")
|
||||
}
|
||||
|
||||
if len(quotas.Items) > 0 {
|
||||
resourceLists := []corev1.ResourceList{allocatable}
|
||||
for _, q := range quotas.Items {
|
||||
resourceLists = append(resourceLists, q.Status.Hard)
|
||||
}
|
||||
|
||||
mergedResourceLists := mergeResourceLists(resourceLists...)
|
||||
|
||||
var virtualNodeList, hostNodeList corev1.NodeList
|
||||
|
||||
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
|
||||
logger.Error(err, "error listing virtual nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
virtResourceMap := make(map[string]corev1.ResourceList)
|
||||
for _, vNode := range virtualNodeList.Items {
|
||||
virtResourceMap[vNode.Name] = corev1.ResourceList{}
|
||||
}
|
||||
|
||||
if err := hostClient.List(ctx, &hostNodeList); err != nil {
|
||||
logger.Error(err, "error listing host nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
hostResourceMap := make(map[string]corev1.ResourceList)
|
||||
|
||||
for _, hNode := range hostNodeList.Items {
|
||||
if _, ok := virtResourceMap[hNode.Name]; ok {
|
||||
hostResourceMap[hNode.Name] = hNode.Status.Allocatable
|
||||
}
|
||||
}
|
||||
|
||||
m := distributeQuotas(hostResourceMap, virtResourceMap, mergedResourceLists)
|
||||
allocatable = m[virtualNodeName]
|
||||
}
|
||||
|
||||
var virtualNode corev1.Node
|
||||
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
|
||||
logger.Error(err, "error getting virtual node for updating node capacity")
|
||||
return
|
||||
}
|
||||
|
||||
virtualNode.Status.Capacity = allocatable
|
||||
virtualNode.Status.Allocatable = allocatable
|
||||
|
||||
if err := virtualClient.Status().Update(ctx, &virtualNode); err != nil {
|
||||
logger.Error(err, "error updating node capacity")
|
||||
}
|
||||
}
|
||||
|
||||
// mergeResourceLists takes multiple resource lists and returns a single list that represents
|
||||
// the most restrictive set of resources. For each resource name, it selects the minimum
|
||||
// quantity found across all the provided lists.
|
||||
func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceList {
|
||||
merged := corev1.ResourceList{}
|
||||
|
||||
for _, resourceList := range resourceLists {
|
||||
for resName, qty := range resourceList {
|
||||
existingQty, found := merged[resName]
|
||||
|
||||
// If it's the first time we see it OR the new one is smaller -> Update
|
||||
if !found || qty.Cmp(existingQty) < 0 {
|
||||
merged[resName] = qty.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// distributeQuotas divides the total resource quotas among all active virtual nodes,
|
||||
// capped by each node's actual host capacity. This ensures that each virtual node
|
||||
// reports a fair share of the available resources without exceeding what its
|
||||
// underlying host node can provide.
|
||||
//
|
||||
// For each resource type the algorithm uses a multi-pass redistribution loop:
|
||||
// 1. Divide the remaining quota evenly among eligible nodes (sorted by name for
|
||||
// determinism), assigning any integer remainder to the first nodes alphabetically.
|
||||
// 2. Cap each node's share at its host allocatable capacity.
|
||||
// 3. Remove nodes that have reached their host capacity.
|
||||
// 4. If there is still unallocated quota (because some nodes were capped below their
|
||||
// even share), repeat from step 1 with the remaining quota and remaining nodes.
|
||||
//
|
||||
// The loop terminates when the quota is fully distributed or no eligible nodes remain.
|
||||
func distributeQuotas(hostResourceMap, virtResourceMap map[string]corev1.ResourceList, quotas corev1.ResourceList) map[string]corev1.ResourceList {
|
||||
resourceMap := make(map[string]corev1.ResourceList, len(virtResourceMap))
|
||||
maps.Copy(resourceMap, virtResourceMap)
|
||||
|
||||
// Distribute each resource type from the policy's hard quota
|
||||
for resourceName, totalQuantity := range quotas {
|
||||
_, useMilli := milliScaleResources[resourceName]
|
||||
|
||||
// eligible nodes for each distribution cycle
|
||||
var eligibleNodes []string
|
||||
|
||||
hostCap := make(map[string]int64)
|
||||
|
||||
// Populate the host nodes capacity map and the initial effective nodes
|
||||
for vn := range virtResourceMap {
|
||||
hostNodeResources := hostResourceMap[vn]
|
||||
if hostNodeResources == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceQuantity, found := hostNodeResources[resourceName]
|
||||
if !found {
|
||||
// skip the node if the resource does not exist on the host node
|
||||
continue
|
||||
}
|
||||
|
||||
hostCap[vn] = resourceQuantity.Value()
|
||||
if useMilli {
|
||||
hostCap[vn] = resourceQuantity.MilliValue()
|
||||
}
|
||||
|
||||
eligibleNodes = append(eligibleNodes, vn)
|
||||
}
|
||||
|
||||
sort.Strings(eligibleNodes)
|
||||
|
||||
totalValue := totalQuantity.Value()
|
||||
if useMilli {
|
||||
totalValue = totalQuantity.MilliValue()
|
||||
}
|
||||
|
||||
// Start of the distribution cycle, each cycle will distribute the quota resource
|
||||
// evenly between nodes, each node can not exceed the corresponding host node capacity
|
||||
for totalValue > 0 && len(eligibleNodes) > 0 {
|
||||
nodeNum := int64(len(eligibleNodes))
|
||||
quantityPerNode := totalValue / nodeNum
|
||||
remainder := totalValue % nodeNum
|
||||
|
||||
remainingNodes := []string{}
|
||||
|
||||
for _, virtualNodeName := range eligibleNodes {
|
||||
nodeQuantity := quantityPerNode
|
||||
if remainder > 0 {
|
||||
nodeQuantity++
|
||||
remainder--
|
||||
}
|
||||
// We cap the quantity to the hostNode capacity
|
||||
nodeQuantity = min(nodeQuantity, hostCap[virtualNodeName])
|
||||
|
||||
if nodeQuantity > 0 {
|
||||
existing := resourceMap[virtualNodeName][resourceName]
|
||||
if useMilli {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewMilliQuantity(existing.MilliValue()+nodeQuantity, totalQuantity.Format)
|
||||
} else {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewQuantity(existing.Value()+nodeQuantity, totalQuantity.Format)
|
||||
}
|
||||
}
|
||||
|
||||
totalValue -= nodeQuantity
|
||||
hostCap[virtualNodeName] -= nodeQuantity
|
||||
|
||||
if hostCap[virtualNodeName] > 0 {
|
||||
remainingNodes = append(remainingNodes, virtualNodeName)
|
||||
}
|
||||
}
|
||||
|
||||
eligibleNodes = remainingNodes
|
||||
}
|
||||
}
|
||||
|
||||
return resourceMap
|
||||
}
|
||||
296
k3k-kubelet/provider/configure_capacity_test.go
Normal file
296
k3k-kubelet/provider/configure_capacity_test.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_distributeQuotas(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := corev1.AddToScheme(scheme)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Large allocatable so capping doesn't interfere with basic distribution tests.
|
||||
largeAllocatable := corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
corev1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
corev1.ResourcePods: resource.MustParse("1000"),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
virtResourceMap map[string]corev1.ResourceList
|
||||
hostResourceMap map[string]corev1.ResourceList
|
||||
quotas corev1.ResourceList
|
||||
want map[string]corev1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "no virtual nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "no quotas",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fewer virtual nodes than host nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
"node-4": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "even distribution of cpu and memory",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uneven distribution with remainder",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-3": {corev1.ResourceCPU: resource.MustParse("666m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "distribution of number resources",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("11"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("666m"),
|
||||
corev1.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "extended resource distributed only to nodes that have it",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("3"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "capping at host capacity with redistribution",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("8"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("6"),
|
||||
},
|
||||
// Even split would be 3 each, but node-2 only has 2 CPU.
|
||||
// node-2 gets capped at 2, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("4")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpu capping with uneven host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("6"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
// Even split would be 2 each, but node-2 only has 1 GPU.
|
||||
// node-2 gets capped at 1, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("3")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "quota exceeds total host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("10"),
|
||||
},
|
||||
// Total host capacity is 4, quota is 10. Each node gets its full capacity.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("2")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
"node-3": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := distributeQuotas(tt.hostResourceMap, tt.virtResourceMap, tt.quotas)
|
||||
|
||||
assert.Equal(t, len(tt.want), len(got), "Number of nodes in result should match")
|
||||
|
||||
for nodeName, expectedResources := range tt.want {
|
||||
actualResources, ok := got[nodeName]
|
||||
assert.True(t, ok, "Node %s not found in result", nodeName)
|
||||
|
||||
assert.Equal(t, len(expectedResources), len(actualResources), "Number of resources for node %s should match", nodeName)
|
||||
|
||||
for resName, expectedQty := range expectedResources {
|
||||
actualQty, ok := actualResources[resName]
|
||||
assert.True(t, ok, "Resource %s not found for node %s", resName, nodeName)
|
||||
assert.True(t, expectedQty.Equal(actualQty), "Resource %s for node %s did not match. want: %s, got: %s", resName, nodeName, expectedQty.String(), actualQty.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,9 +35,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
v1alpha1stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -47,47 +46,65 @@ import (
|
||||
// check at compile time if the Provider implements the nodeutil.Provider interface
|
||||
var _ nodeutil.Provider = (*Provider)(nil)
|
||||
|
||||
// ClusterContext includes the controller runtime manager and clients
|
||||
type ClusterContext struct {
|
||||
Config rest.Config
|
||||
Client client.Client
|
||||
CoreClient cv1.CoreV1Interface
|
||||
Manager manager.Manager
|
||||
}
|
||||
|
||||
// Provider implements nodetuil.Provider from virtual Kubelet.
|
||||
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
|
||||
type Provider struct {
|
||||
Host ClusterContext
|
||||
Virtual ClusterContext
|
||||
Translator translate.ToHostTranslator
|
||||
HostClient client.Client
|
||||
VirtualClient client.Client
|
||||
VirtualManager manager.Manager
|
||||
ClientConfig rest.Config
|
||||
CoreClient cv1.CoreV1Interface
|
||||
ClusterNamespace string
|
||||
ClusterName string
|
||||
serverIP string
|
||||
dnsIP string
|
||||
agentHostname string
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
var ErrRetryTimeout = errors.New("provider timed out")
|
||||
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP, agentHostname string) (*Provider, error) {
|
||||
coreClient, err := cv1.NewForConfig(&hostConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtualCoreClient, err := cv1.NewForConfig(virtualMgr.GetConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: name,
|
||||
ClusterNamespace: namespace,
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
VirtualManager: virtualMgr,
|
||||
Host: ClusterContext{
|
||||
Manager: hostMgr,
|
||||
Client: hostMgr.GetClient(),
|
||||
CoreClient: coreClient,
|
||||
Config: hostConfig,
|
||||
},
|
||||
Virtual: ClusterContext{
|
||||
Manager: virtualMgr,
|
||||
Client: virtualMgr.GetClient(),
|
||||
CoreClient: virtualCoreClient,
|
||||
},
|
||||
Translator: translator,
|
||||
ClientConfig: hostConfig,
|
||||
CoreClient: coreClient,
|
||||
ClusterNamespace: namespace,
|
||||
ClusterName: name,
|
||||
logger: logger.WithValues("cluster", name),
|
||||
serverIP: serverIP,
|
||||
dnsIP: dnsIP,
|
||||
agentHostname: agentHostname,
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
@@ -127,7 +144,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, name, contai
|
||||
options.SinceTime = &sinceTime
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
closer, err := p.Host.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting logs from container")
|
||||
}
|
||||
@@ -143,7 +160,7 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containe
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
|
||||
logger.V(1).Info("RunInContainer")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
req := p.Host.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
@@ -158,7 +175,7 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containe
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.Host.Config, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating SPDY executor")
|
||||
return err
|
||||
@@ -188,7 +205,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
|
||||
logger.V(1).Info("AttachToContainer")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
req := p.Host.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
@@ -202,7 +219,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.Host.Config, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating SPDY executor")
|
||||
return err
|
||||
@@ -225,47 +242,35 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
|
||||
}
|
||||
|
||||
// GetStatsSummary gets the stats for the node, including running pods
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*v1alpha1stats.Summary, error) {
|
||||
p.logger.V(1).Info("GetStatsSummary")
|
||||
|
||||
nodeList := &corev1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
node, err := p.Host.CoreClient.Nodes().Get(ctx, p.agentHostname, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Unable to get nodes of cluster")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fetch the stats from all the nodes
|
||||
var (
|
||||
nodeStats stats.NodeStats
|
||||
allPodsStats []stats.PodStats
|
||||
)
|
||||
|
||||
for _, n := range nodeList.Items {
|
||||
res, err := p.CoreClient.RESTClient().
|
||||
Get().
|
||||
Resource("nodes").
|
||||
Name(n.Name).
|
||||
SubResource("proxy").
|
||||
Suffix("stats/summary").
|
||||
DoRaw(ctx)
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", n.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats := &stats.Summary{}
|
||||
if err := json.Unmarshal(res, stats); err != nil {
|
||||
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", n.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: we should probably calculate somehow the node stats from the different nodes of the host
|
||||
// or reflect different nodes from the virtual kubelet.
|
||||
// For the moment let's just pick one random node stats.
|
||||
nodeStats = stats.Node
|
||||
allPodsStats = append(allPodsStats, stats.Pods...)
|
||||
res, err := p.Host.CoreClient.RESTClient().
|
||||
Get().
|
||||
Resource("nodes").
|
||||
Name(node.Name).
|
||||
SubResource("proxy").
|
||||
Suffix("stats/summary").
|
||||
DoRaw(ctx)
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", node.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var statsSummary v1alpha1stats.Summary
|
||||
if err := json.Unmarshal(res, &statsSummary); err != nil {
|
||||
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", node.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeStats := statsSummary.Node
|
||||
|
||||
pods, err := p.GetPods(ctx)
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Error getting pods from cluster for stats")
|
||||
@@ -279,12 +284,12 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
podsNameMap[hostPodName] = pod
|
||||
}
|
||||
|
||||
filteredStats := &stats.Summary{
|
||||
filteredStats := &v1alpha1stats.Summary{
|
||||
Node: nodeStats,
|
||||
Pods: make([]stats.PodStats, 0),
|
||||
Pods: make([]v1alpha1stats.PodStats, 0),
|
||||
}
|
||||
|
||||
for _, podStat := range allPodsStats {
|
||||
for _, podStat := range statsSummary.Pods {
|
||||
// skip pods that are not in the cluster namespace
|
||||
if podStat.PodRef.Namespace != p.ClusterNamespace {
|
||||
continue
|
||||
@@ -292,7 +297,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
|
||||
// rewrite the PodReference to match the data of the virtual cluster
|
||||
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
|
||||
podStat.PodRef = stats.PodReference{
|
||||
podStat.PodRef = v1alpha1stats.PodReference{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
UID: string(pod.UID),
|
||||
@@ -333,13 +338,13 @@ func (p *Provider) PortForward(ctx context.Context, namespace, name string, port
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "port", port)
|
||||
logger.V(1).Info("PortForward")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
req := p.Host.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("portforward")
|
||||
|
||||
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
|
||||
transport, upgrader, err := spdy.RoundTripperFor(&p.Host.Config)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating RoundTripper for PortForward")
|
||||
return err
|
||||
@@ -383,7 +388,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
if err := p.Host.Client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
logger.Error(err, "Error getting Virtual Cluster definition")
|
||||
return err
|
||||
}
|
||||
@@ -395,7 +400,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
var virtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
|
||||
if err := p.Virtual.Client.Get(ctx, key, &virtualPod); err != nil {
|
||||
logger.Error(err, "Error getting Pod from Virtual Cluster")
|
||||
return err
|
||||
}
|
||||
@@ -408,39 +413,69 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
logger = logger.WithValues("pod", hostPod.Name)
|
||||
|
||||
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
|
||||
// Clear the NodeName to allow scheduling, and set affinity to prefer scheduling the Pod on the same host node as the virtual kubelet,
|
||||
// unless the user has specified their own affinity, in which case the user's affinity is respected.
|
||||
|
||||
hostPod.Spec.NodeName = ""
|
||||
|
||||
if hostPod.Spec.Affinity == nil {
|
||||
hostPod.Spec.Affinity = &corev1.Affinity{
|
||||
NodeAffinity: &corev1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{{
|
||||
Weight: 100,
|
||||
Preference: corev1.NodeSelectorTerm{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: corev1.NodeSelectorOpIn,
|
||||
Values: []string{p.agentHostname},
|
||||
}},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// The pod's own nodeSelector is ignored.
|
||||
// The final selector is determined by the cluster spec, but overridden by a policy if present.
|
||||
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
if cluster.Status.Policy != nil && len(cluster.Status.Policy.NodeSelector) > 0 {
|
||||
hostPod.Spec.NodeSelector = cluster.Status.Policy.NodeSelector
|
||||
}
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if virtualPod.Spec.Hostname == "" {
|
||||
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
|
||||
}
|
||||
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// When a PriorityClass is set we will use the translated one in the HostCluster.
|
||||
// If the Cluster or a Policy defines a PriorityClass of the host we are going to use that one.
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.PriorityClassName = tPriorityClassName
|
||||
//
|
||||
// TODO: we probably need to define a custom "intermediate" k3k-system-* priority
|
||||
if strings.HasPrefix(virtualPod.Spec.PriorityClassName, "system-") {
|
||||
hostPod.Spec.PriorityClassName = virtualPod.Spec.PriorityClassName
|
||||
} else {
|
||||
enforcedPriorityClassName := cluster.Spec.PriorityClass
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.PriorityClass != nil {
|
||||
enforcedPriorityClassName = *cluster.Status.Policy.PriorityClass
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
if enforcedPriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = enforcedPriorityClassName
|
||||
} else if virtualPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = p.Translator.TranslateName("", virtualPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(&virtualPod, hostPod); err != nil {
|
||||
logger.Error(err, "Unable to fetch fieldpath annotations for pod")
|
||||
return err
|
||||
// if the priority class is set we need to remove the priority
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// volumes will often refer to resources in the virtual cluster
|
||||
// but instead need to refer to the synced host cluster version
|
||||
p.transformVolumes(pod.Namespace, hostPod.Spec.Volumes)
|
||||
@@ -459,12 +494,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
configureNetworking(hostPod, virtualPod.Name, virtualPod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.Host.Client.Scheme()); err != nil {
|
||||
logger.Error(err, "Unable to set owner reference for pod")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostPod); err != nil {
|
||||
if err := p.Host.Client.Create(ctx, hostPod); err != nil {
|
||||
logger.Error(err, "Error creating pod on host cluster")
|
||||
return err
|
||||
}
|
||||
@@ -569,14 +604,14 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
var hostPod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, hostKey, &hostPod); err != nil {
|
||||
if err := p.Host.Client.Get(ctx, hostKey, &hostPod); err != nil {
|
||||
logger.Error(err, "Unable to get Pod to update from host cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
updatePod(&hostPod, pod)
|
||||
|
||||
if err := p.HostClient.Update(ctx, &hostPod); err != nil {
|
||||
if err := p.Host.Client.Update(ctx, &hostPod); err != nil {
|
||||
logger.Error(err, "Unable to update Pod in host cluster")
|
||||
return err
|
||||
}
|
||||
@@ -587,7 +622,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
hostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := p.Host.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "Error when updating ephemeral containers in host pod")
|
||||
return err
|
||||
}
|
||||
@@ -605,14 +640,14 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
var virtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
|
||||
if err := p.Virtual.Client.Get(ctx, key, &virtualPod); err != nil {
|
||||
logger.Error(err, "Unable to get pod to update from virtual cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
updatePod(&virtualPod, pod)
|
||||
|
||||
if err := p.VirtualClient.Update(ctx, &virtualPod); err != nil {
|
||||
if err := p.Virtual.Client.Update(ctx, &virtualPod); err != nil {
|
||||
logger.Error(err, "Unable to update Pod in virtual cluster")
|
||||
return err
|
||||
}
|
||||
@@ -623,7 +658,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
virtualPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := p.Host.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "Error when updating ephemeral containers in virtual pod")
|
||||
return err
|
||||
}
|
||||
@@ -672,7 +707,7 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
|
||||
logger.V(1).Info("DeletePod")
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
|
||||
err := p.Host.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Info("Pod to delete not found in host cluster")
|
||||
@@ -734,7 +769,7 @@ func (p *Provider) getPodFromHostCluster(ctx context.Context, hostPodName string
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, key, &pod); err != nil {
|
||||
if err := p.Host.Client.Get(ctx, key, &pod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -762,7 +797,7 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
|
||||
var podList corev1.PodList
|
||||
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
err = p.Host.Client.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Error listing pods from host cluster")
|
||||
return nil, err
|
||||
@@ -939,32 +974,3 @@ func (p *Provider) configureEnvFrom(virtualPod *corev1.Pod, envs []corev1.EnvFro
|
||||
|
||||
return resultingEnvVars
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
for name, value := range pod.Annotations {
|
||||
if strings.Contains(name, webhook.FieldpathField) {
|
||||
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// re-adding these envs to the pod
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
|
||||
Name: envName,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: value,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// removing the annotation from the pod
|
||||
delete(tPod.Annotations, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,14 @@ package provider
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -20,22 +23,36 @@ const (
|
||||
serviceAccountTokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
|
||||
)
|
||||
|
||||
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
|
||||
// transformTokens copies the serviceaccount tokens used by virtualPod's serviceaccount to a secret on the host cluster and mount it
|
||||
// to look like the serviceaccount token
|
||||
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "serviceAccountNameod", pod.Spec.ServiceAccountName)
|
||||
logger.V(1).Info("Transforming token")
|
||||
func (p *Provider) transformTokens(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
|
||||
logger := p.logger.WithValues("namespace", virtualPod.Namespace, "name", virtualPod.Name, "serviceAccountName", virtualPod.Spec.ServiceAccountName)
|
||||
logger.V(1).Info("Transforming service account tokens")
|
||||
|
||||
// transform projected service account token
|
||||
if err := p.transformProjectedTokens(ctx, virtualPod, hostPod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// transform kube-api-access token for all containers in virtualPod
|
||||
if err := p.transformKubeAccessToken(ctx, virtualPod, hostPod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) transformKubeAccessToken(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
|
||||
// skip this process if the kube-api-access is already removed from the pod
|
||||
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
|
||||
if !isKubeAccessVolumeFound(pod) {
|
||||
if !hasKubeAccessVolume(virtualPod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
|
||||
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(virtualPod.Spec.ServiceAccountName, "token")
|
||||
|
||||
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
|
||||
virtualSecret := virtualSecret(virtualSecretName, virtualPod.Namespace, virtualPod.Spec.ServiceAccountName)
|
||||
if err := p.Virtual.Client.Create(ctx, virtualSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
@@ -46,7 +63,7 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
Name: virtualSecret.Name,
|
||||
Namespace: virtualSecret.Namespace,
|
||||
}
|
||||
if err := p.VirtualClient.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
|
||||
if err := p.Virtual.Client.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
// To avoid race conditions we need to check if the secret's data has been populated
|
||||
@@ -55,23 +72,107 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
|
||||
}
|
||||
|
||||
hostSecret := virtualSecret.DeepCopy()
|
||||
hostSecret.Type = ""
|
||||
hostSecret.Annotations = make(map[string]string)
|
||||
hostSecret, err := p.translateAndCreateHostTokenSecret(ctx, virtualSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Translator.TranslateTo(hostSecret)
|
||||
hostPod.Spec.ServiceAccountName = ""
|
||||
hostPod.Spec.DeprecatedServiceAccount = ""
|
||||
hostPod.Spec.AutomountServiceAccountToken = ptr.To(false)
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
removeKubeAccessVolume(hostPod)
|
||||
addKubeAccessVolume(hostPod, hostSecret.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// transformProjectedTokens will iterate over the host pod projected volume sources
|
||||
// and transform projected tokens to use a requested token secret from the virtual cluster
|
||||
// instead the automatically generated secret on the host cluster.
|
||||
func (p *Provider) transformProjectedTokens(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
|
||||
for i, volume := range hostPod.Spec.Volumes {
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
if volume.Projected == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for j, source := range volume.Projected.Sources {
|
||||
if source.ServiceAccountToken == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
projectedSecret, err := p.requestTokenSecret(ctx, source.ServiceAccountToken, virtualPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hostSecret, err := p.translateAndCreateHostTokenSecret(ctx, projectedSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// replace the projected token volume with a projected secret
|
||||
hostPod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken = nil
|
||||
hostPod.Spec.Volumes[i].Projected.Sources[j].Secret = &corev1.SecretProjection{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: hostSecret.Name,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.translateToken(tPod, hostSecret.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) requestTokenSecret(ctx context.Context, token *corev1.ServiceAccountTokenProjection, virtualPod *corev1.Pod) (*corev1.Secret, error) {
|
||||
namespace := virtualPod.Namespace
|
||||
serviceAccountName := virtualPod.Spec.ServiceAccountName
|
||||
|
||||
var audiences []string
|
||||
if token.Audience != "" {
|
||||
audiences = []string{token.Audience}
|
||||
}
|
||||
|
||||
tokenRequest := &authv1.TokenRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: audiences,
|
||||
ExpirationSeconds: token.ExpirationSeconds,
|
||||
BoundObjectRef: &authv1.BoundObjectReference{
|
||||
Name: virtualPod.Name,
|
||||
UID: virtualPod.UID,
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tokenResp, err := p.Virtual.CoreClient.ServiceAccounts(namespace).CreateToken(ctx, serviceAccountName, tokenRequest, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a virtual secret with that token
|
||||
virtualSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// creating unique name for the virtual secret based on the request attributes
|
||||
Name: generateTokenSecretName(serviceAccountName, token.Path, tokenResp),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
token.Path: []byte(tokenResp.Status.Token),
|
||||
},
|
||||
}
|
||||
|
||||
return virtualSecret, nil
|
||||
}
|
||||
|
||||
func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -89,17 +190,25 @@ func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
|
||||
}
|
||||
}
|
||||
|
||||
// translateToken will remove the serviceaccount from the pod and replace the kube-api-access volume
|
||||
// with a custom token volume and mount it to all containers within the pod
|
||||
func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) {
|
||||
pod.Spec.ServiceAccountName = ""
|
||||
pod.Spec.DeprecatedServiceAccount = ""
|
||||
pod.Spec.AutomountServiceAccountToken = ptr.To(false)
|
||||
removeKubeAccessVolume(pod)
|
||||
addKubeAccessVolume(pod, hostSecretName)
|
||||
func (p *Provider) translateAndCreateHostTokenSecret(ctx context.Context, projectedToken *corev1.Secret) (*corev1.Secret, error) {
|
||||
hostSecret := projectedToken.DeepCopy()
|
||||
hostSecret.Type = ""
|
||||
hostSecret.Annotations = make(map[string]string)
|
||||
|
||||
p.Translator.TranslateTo(hostSecret)
|
||||
|
||||
data := hostSecret.Data
|
||||
if _, err := controllerutil.CreateOrUpdate(ctx, p.Host.Client, hostSecret, func() error {
|
||||
hostSecret.Data = data
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hostSecret, nil
|
||||
}
|
||||
|
||||
func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
|
||||
func hasKubeAccessVolume(pod *corev1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
return true
|
||||
@@ -171,4 +280,29 @@ func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
|
||||
MountPath: serviceAccountTokenMountPath,
|
||||
})
|
||||
}
|
||||
|
||||
for i := range pod.Spec.EphemeralContainers {
|
||||
pod.Spec.EphemeralContainers[i].VolumeMounts = append(pod.Spec.EphemeralContainers[i].VolumeMounts, corev1.VolumeMount{
|
||||
Name: tokenVolumeName,
|
||||
MountPath: serviceAccountTokenMountPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func generateTokenSecretName(serviceAccountName, tokenPath string, tokenReq *authv1.TokenRequest) string {
|
||||
nameComponents := []string{serviceAccountName}
|
||||
|
||||
if tokenReq.Spec.Audiences != nil {
|
||||
nameComponents = append(nameComponents, tokenReq.Spec.Audiences...)
|
||||
}
|
||||
|
||||
if exp := tokenReq.Spec.ExpirationSeconds; exp != nil {
|
||||
nameComponents = append(nameComponents, strconv.FormatInt(*exp, 10))
|
||||
}
|
||||
|
||||
if tokenPath != "" {
|
||||
nameComponents = append(nameComponents, tokenPath)
|
||||
}
|
||||
|
||||
return k3kcontroller.SafeConcatNameWithPrefix(nameComponents...)
|
||||
}
|
||||
|
||||
327
k3k-kubelet/provider/token_test.go
Normal file
327
k3k-kubelet/provider/token_test.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
authv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func Test_hasKubeAccessVolume(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *corev1.Pod
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "no volumes",
|
||||
pod: &corev1.Pod{},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "volume with kube-api-access prefix",
|
||||
pod: &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "kube-api-access-abc123"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "exact kube-api-access name",
|
||||
pod: &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "kube-api-access"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "volume without kube-api-access prefix",
|
||||
pod: &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "my-volume"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "multiple volumes with one kube-api-access",
|
||||
pod: &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "config-volume"},
|
||||
{Name: "kube-api-access-xyz"},
|
||||
{Name: "data-volume"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, hasKubeAccessVolume(tt.pod))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_removeKubeAccessVolume(t *testing.T) {
|
||||
t.Run("removes volume and all volume mounts from containers", func(t *testing.T) {
|
||||
pod := &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "config-volume"},
|
||||
{Name: "kube-api-access-abc"},
|
||||
{Name: "data-volume"},
|
||||
},
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "init",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{Name: "config-volume", MountPath: "/config"},
|
||||
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
|
||||
{Name: "data-volume", MountPath: "/data"},
|
||||
},
|
||||
},
|
||||
},
|
||||
EphemeralContainers: []corev1.EphemeralContainer{
|
||||
{
|
||||
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
|
||||
Name: "debug",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
removeKubeAccessVolume(pod)
|
||||
|
||||
// Verify volume was removed
|
||||
assert.Equal(t, 2, len(pod.Spec.Volumes))
|
||||
assert.Equal(t, "config-volume", pod.Spec.Volumes[0].Name)
|
||||
assert.Equal(t, "data-volume", pod.Spec.Volumes[1].Name)
|
||||
|
||||
// Verify init container mount was removed
|
||||
assert.Equal(t, 1, len(pod.Spec.InitContainers[0].VolumeMounts))
|
||||
assert.Equal(t, "config-volume", pod.Spec.InitContainers[0].VolumeMounts[0].Name)
|
||||
|
||||
// Verify container mount was removed
|
||||
assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts))
|
||||
assert.Equal(t, "data-volume", pod.Spec.Containers[0].VolumeMounts[0].Name)
|
||||
|
||||
// Verify ephemeral container mount was removed
|
||||
assert.Equal(t, 0, len(pod.Spec.EphemeralContainers[0].VolumeMounts))
|
||||
})
|
||||
|
||||
t.Run("no kube-api-access volume present", func(t *testing.T) {
|
||||
pod := &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "config-volume"},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{Name: "config-volume", MountPath: "/config"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
removeKubeAccessVolume(pod)
|
||||
|
||||
assert.Equal(t, 1, len(pod.Spec.Volumes))
|
||||
assert.Equal(t, "config-volume", pod.Spec.Volumes[0].Name)
|
||||
assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_addKubeAccessVolume(t *testing.T) {
|
||||
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
hostSecretName := "host-secret-token"
|
||||
|
||||
pod := &corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
{Name: "existing-volume"},
|
||||
},
|
||||
InitContainers: []corev1.Container{
|
||||
{Name: "init"},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{Name: "main"},
|
||||
{Name: "sidecar"},
|
||||
},
|
||||
EphemeralContainers: []corev1.EphemeralContainer{
|
||||
{
|
||||
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
|
||||
Name: "debug",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
addKubeAccessVolume(pod, hostSecretName)
|
||||
|
||||
// Verify volume was added
|
||||
assert.Equal(t, 2, len(pod.Spec.Volumes))
|
||||
addedVol := pod.Spec.Volumes[1]
|
||||
assert.Equal(t, tokenVolumeName, addedVol.Name)
|
||||
assert.Equal(t, hostSecretName, addedVol.Secret.SecretName)
|
||||
|
||||
// Verify init container mount was added
|
||||
assert.Equal(t, 1, len(pod.Spec.InitContainers[0].VolumeMounts))
|
||||
assert.Equal(t, tokenVolumeName, pod.Spec.InitContainers[0].VolumeMounts[0].Name)
|
||||
assert.Equal(t, serviceAccountTokenMountPath, pod.Spec.InitContainers[0].VolumeMounts[0].MountPath)
|
||||
|
||||
// Verify all container mounts were added
|
||||
for _, c := range pod.Spec.Containers {
|
||||
assert.Equal(t, 1, len(c.VolumeMounts), "container %s should have mount", c.Name)
|
||||
assert.Equal(t, tokenVolumeName, c.VolumeMounts[0].Name)
|
||||
assert.Equal(t, serviceAccountTokenMountPath, c.VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
// Verify ephemeral container mounts were added
|
||||
for _, c := range pod.Spec.EphemeralContainers {
|
||||
assert.Equal(t, 1, len(c.VolumeMounts), "ephemeral container %s should have mount", c.Name)
|
||||
assert.Equal(t, tokenVolumeName, c.VolumeMounts[0].Name)
|
||||
assert.Equal(t, serviceAccountTokenMountPath, c.VolumeMounts[0].MountPath)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_virtualSecret(t *testing.T) {
|
||||
s := virtualSecret("my-secret", "my-ns", "my-sa")
|
||||
|
||||
assert.Equal(t, "my-secret", s.Name)
|
||||
assert.Equal(t, "my-ns", s.Namespace)
|
||||
assert.Equal(t, corev1.SecretTypeServiceAccountToken, s.Type)
|
||||
assert.Equal(t, "my-sa", s.Annotations[corev1.ServiceAccountNameKey])
|
||||
assert.Equal(t, "Secret", s.Kind)
|
||||
assert.Equal(t, "v1", s.APIVersion)
|
||||
}
|
||||
|
||||
func Test_generateTokenSecretName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
serviceAccountName string
|
||||
tokenPath string
|
||||
tokenReq *authv1.TokenRequest
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "no audiences, no expiration, no path",
|
||||
serviceAccountName: "default",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{},
|
||||
},
|
||||
want: "k3k-default",
|
||||
},
|
||||
{
|
||||
name: "no audiences, with expiration",
|
||||
serviceAccountName: "default",
|
||||
tokenPath: "token",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
ExpirationSeconds: ptr.To(int64(3600)),
|
||||
},
|
||||
},
|
||||
want: "k3k-default-3600-token",
|
||||
},
|
||||
{
|
||||
name: "with single audience and expiration",
|
||||
serviceAccountName: "my-sa",
|
||||
tokenPath: "token",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: ptr.To(int64(3600)),
|
||||
},
|
||||
},
|
||||
want: "k3k-my-sa-api-3600-token",
|
||||
},
|
||||
{
|
||||
name: "with multiple audiences and expiration",
|
||||
serviceAccountName: "my-sa",
|
||||
tokenPath: "token",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: []string{"api", "vault"},
|
||||
ExpirationSeconds: ptr.To(int64(3600)),
|
||||
},
|
||||
},
|
||||
want: "k3k-my-sa-api-vault-3600-token",
|
||||
},
|
||||
{
|
||||
name: "with audiences, no expiration",
|
||||
serviceAccountName: "my-sa",
|
||||
tokenPath: "vault-token",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
},
|
||||
},
|
||||
want: "k3k-my-sa-api-vault-token",
|
||||
},
|
||||
{
|
||||
name: "different paths produce different names",
|
||||
serviceAccountName: "my-sa",
|
||||
tokenPath: "other-path",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: []string{"api"},
|
||||
ExpirationSeconds: ptr.To(int64(3600)),
|
||||
},
|
||||
},
|
||||
want: "k3k-my-sa-api-3600-other-path",
|
||||
},
|
||||
{
|
||||
name: "long name gets truncated with hash",
|
||||
serviceAccountName: "my-very-long-service-account-name",
|
||||
tokenPath: "some-very-long-token-path-value",
|
||||
tokenReq: &authv1.TokenRequest{
|
||||
Spec: authv1.TokenRequestSpec{
|
||||
Audiences: []string{"some-very-long-audience-string"},
|
||||
ExpirationSeconds: ptr.To(int64(3600)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := generateTokenSecretName(tt.serviceAccountName, tt.tokenPath, tt.tokenReq)
|
||||
if tt.want != "" {
|
||||
assert.Equal(t, tt.want, got)
|
||||
}
|
||||
|
||||
assert.Less(t, len(got), 64, "name should be under 64 characters")
|
||||
})
|
||||
}
|
||||
}
|
||||
5
main.go
5
main.go
@@ -35,7 +35,6 @@ var (
|
||||
config cluster.Config
|
||||
kubeconfig string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logFormat string
|
||||
@@ -57,6 +56,7 @@ func main() {
|
||||
},
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
cmds.InitializeConfig(cmd)
|
||||
|
||||
logger = zapr.NewLogger(log.New(debug, logFormat))
|
||||
},
|
||||
RunE: run,
|
||||
@@ -71,7 +71,6 @@ func main() {
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
|
||||
@@ -109,7 +108,7 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
|
||||
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange)
|
||||
if err := mgr.Add(runnable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
ARG BIN_K3K=bin/k3k
|
||||
ARG BIN_K3KCLI=bin/k3kcli
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier
|
||||
FROM registry.suse.com/bci/bci-base:15.6
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
ARG BIN_K3K_KUBELET=bin/k3k-kubelet
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@@ -123,7 +125,7 @@ type ClusterSpec struct {
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
// +optional
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef,omitempty"`
|
||||
TokenSecretRef *corev1.SecretReference `json:"tokenSecretRef,omitempty"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
@@ -145,12 +147,12 @@ type ClusterSpec struct {
|
||||
// ServerEnvs specifies list of environment variables to set in the server pod.
|
||||
//
|
||||
// +optional
|
||||
ServerEnvs []v1.EnvVar `json:"serverEnvs,omitempty"`
|
||||
ServerEnvs []corev1.EnvVar `json:"serverEnvs,omitempty"`
|
||||
|
||||
// AgentEnvs specifies list of environment variables to set in the agent pod.
|
||||
//
|
||||
// +optional
|
||||
AgentEnvs []v1.EnvVar `json:"agentEnvs,omitempty"`
|
||||
AgentEnvs []corev1.EnvVar `json:"agentEnvs,omitempty"`
|
||||
|
||||
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
|
||||
//
|
||||
@@ -160,12 +162,24 @@ type ClusterSpec struct {
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
//
|
||||
// +optional
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
ServerLimit corev1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
//
|
||||
// +optional
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
WorkerLimit corev1.ResourceList `json:"workerLimit,omitempty"`
|
||||
|
||||
// ServerAffinity specifies the affinity rules for server pods.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
ServerAffinity *corev1.Affinity `json:"serverAffinity,omitempty"`
|
||||
|
||||
// AgentAffinity specifies the affinity rules for agent pods.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
AgentAffinity *corev1.Affinity `json:"agentAffinity,omitempty"`
|
||||
|
||||
// MirrorHostNodes controls whether node objects from the host cluster
|
||||
// are mirrored into the virtual cluster.
|
||||
@@ -183,6 +197,49 @@ type ClusterSpec struct {
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
|
||||
// SecretMounts specifies a list of secrets to mount into server and agent pods.
|
||||
// Each entry defines a secret and its mount path within the pods.
|
||||
//
|
||||
// +optional
|
||||
SecretMounts []SecretMount `json:"secretMounts,omitempty"`
|
||||
|
||||
// SecurityContext specifies custom SecurityContext to be added
|
||||
// to the agent and server pods of the cluster in virtual or shared mode.
|
||||
// This option will override the SecurityContext set by default for virtual mode.
|
||||
//
|
||||
// +optional
|
||||
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
|
||||
|
||||
// RuntimeClassName specifies alternative runtime class for the
|
||||
// agent and server pods of the cluster in virtual or shared mode.
|
||||
//
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||
}
|
||||
|
||||
// SecretMount defines a secret to be mounted into server or agent pods,
|
||||
// allowing for custom configurations, certificates, or other sensitive data.
|
||||
type SecretMount struct {
|
||||
// Embeds SecretName, Items, DefaultMode, and Optional
|
||||
corev1.SecretVolumeSource `json:",inline"`
|
||||
// MountPath is the path within server and agent pods where the
|
||||
// secret contents will be mounted.
|
||||
//
|
||||
// +optional
|
||||
MountPath string `json:"mountPath,omitempty"`
|
||||
// SubPath is an optional path within the secret to mount instead of the root.
|
||||
// When specified, only the specified key from the secret will be mounted as a file
|
||||
// at MountPath, keeping the parent directory writable.
|
||||
//
|
||||
// +optional
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
// Role is the type of the k3k pod that will be used to mount the secret.
|
||||
// This can be 'server', 'agent', or 'all' (for both).
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:Enum=server;agent;all
|
||||
Role string `json:"role,omitempty"`
|
||||
}
|
||||
|
||||
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
@@ -217,9 +274,14 @@ type SyncConfig struct {
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
// +optional
|
||||
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
|
||||
// StorageClasses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
// +optional
|
||||
StorageClasses StorageClassSyncConfig `json:"storageClasses"`
|
||||
}
|
||||
|
||||
// SecretSyncConfig specifies the sync options for services.
|
||||
// SecretSyncConfig specifies the sync options for Secrets.
|
||||
type SecretSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -234,7 +296,7 @@ type SecretSyncConfig struct {
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSyncConfig specifies the sync options for services.
|
||||
// ServiceSyncConfig specifies the sync options for Services.
|
||||
type ServiceSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -249,7 +311,7 @@ type ServiceSyncConfig struct {
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigMapSyncConfig specifies the sync options for services.
|
||||
// ConfigMapSyncConfig specifies the sync options for ConfigMaps.
|
||||
type ConfigMapSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -264,7 +326,7 @@ type ConfigMapSyncConfig struct {
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// IngressSyncConfig specifies the sync options for services.
|
||||
// IngressSyncConfig specifies the sync options for Ingresses.
|
||||
type IngressSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -277,9 +339,16 @@ type IngressSyncConfig struct {
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
|
||||
// DisableTLSSecretTranslation is an on/off switch for translating TLS secrets
|
||||
// from virtual cluster to host cluster
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +optional
|
||||
DisableTLSSecretTranslation bool `json:"disableTLSSecretTranslation,omitempty"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
// PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
|
||||
type PersistentVolumeClaimSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -294,7 +363,7 @@ type PersistentVolumeClaimSyncConfig struct {
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// PriorityClassSyncConfig specifies the sync options for services.
|
||||
// PriorityClassSyncConfig specifies the sync options for PriorityClasses.
|
||||
type PriorityClassSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
@@ -309,6 +378,21 @@ type PriorityClassSyncConfig struct {
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// StorageClassSyncConfig specifies the sync options for StorageClasses.
|
||||
type StorageClassSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=shared;virtual
|
||||
@@ -362,8 +446,9 @@ type PersistenceConfig struct {
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +kubebuilder:default="2G"
|
||||
// +kubebuilder:validation:XValidation:message="storageRequestSize is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
StorageRequestSize *resource.Quantity `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
|
||||
// ExposeConfig specifies options for exposing the API server.
|
||||
@@ -467,10 +552,9 @@ type CredentialSources struct {
|
||||
// CredentialSource defines where to get a credential from.
|
||||
// It can represent either a TLS key pair or a single private key.
|
||||
type CredentialSource struct {
|
||||
// SecretName specifies the name of an existing secret to use.
|
||||
// The controller expects specific keys inside based on the credential type:
|
||||
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
// - For ServiceAccountTokenKey: 'tls.key'.
|
||||
// The secret must contain specific keys based on the credential type:
|
||||
// - For TLS certificate pairs (e.g., ServerCA): `tls.crt` and `tls.key`.
|
||||
// - For the ServiceAccountToken signing key: `tls.key`.
|
||||
SecretName string `json:"secretName"`
|
||||
}
|
||||
|
||||
@@ -506,16 +590,17 @@ type ClusterStatus struct {
|
||||
// +optional
|
||||
PolicyName string `json:"policyName,omitempty"`
|
||||
|
||||
// policy represents the status of the policy applied to this cluster.
|
||||
// This field is set by the VirtualClusterPolicy controller.
|
||||
//
|
||||
// +optional
|
||||
Policy *AppliedPolicy `json:"policy,omitempty"`
|
||||
|
||||
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
KubeletPort int `json:"kubeletPort,omitempty"`
|
||||
|
||||
// WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
WebhookPort int `json:"webhookPort,omitempty"`
|
||||
|
||||
// Conditions are the individual conditions for the cluster set.
|
||||
//
|
||||
// +optional
|
||||
@@ -529,6 +614,54 @@ type ClusterStatus struct {
|
||||
Phase ClusterPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// AppliedPolicy defines the observed state of an applied policy.
|
||||
type AppliedPolicy struct {
|
||||
// name is the name of the VirtualClusterPolicy currently applied to this cluster.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength:=1
|
||||
// +required
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// priorityClass is the priority class enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass *string `json:"priorityClass,omitempty"`
|
||||
|
||||
// nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
ServerAffinity *corev1.Affinity `json:"serverAffinity,omitempty"`
|
||||
|
||||
// agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
AgentAffinity *corev1.Affinity `json:"agentAffinity,omitempty"`
|
||||
|
||||
// sync is the SyncConfig enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
|
||||
// SecurityContext specifies custom SecurityContext to be added
|
||||
// to the agent and server pods of the cluster in virtual or shared mode.
|
||||
//
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||
|
||||
// RuntimeClassName specifies alternative runtime class for the
|
||||
// agent and server pods of the cluster in virtual or shared mode.
|
||||
//
|
||||
// +optional
|
||||
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
type ClusterPhase string
|
||||
|
||||
@@ -582,13 +715,13 @@ type VirtualClusterPolicySpec struct {
|
||||
// Quota specifies the resource limits for clusters within a clusterpolicy.
|
||||
//
|
||||
// +optional
|
||||
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
Quota *corev1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
|
||||
// Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
|
||||
// to set defaults and constraints (min/max)
|
||||
//
|
||||
// +optional
|
||||
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
|
||||
Limit *corev1.LimitRangeSpec `json:"limit,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
|
||||
//
|
||||
@@ -600,6 +733,18 @@ type VirtualClusterPolicySpec struct {
|
||||
// +optional
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
DefaultServerAffinity *corev1.Affinity `json:"defaultServerAffinity,omitempty"`
|
||||
|
||||
// DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace.
|
||||
// This includes both node affinity and pod affinity/anti-affinity rules.
|
||||
//
|
||||
// +optional
|
||||
DefaultAgentAffinity *corev1.Affinity `json:"defaultAgentAffinity,omitempty"`
|
||||
|
||||
// AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
|
||||
//
|
||||
// +kubebuilder:default=shared
|
||||
@@ -622,6 +767,18 @@ type VirtualClusterPolicySpec struct {
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
|
||||
// SecurityContext specifies custom SecurityContext to be added
|
||||
// to the agent and server pods of the cluster in virtual or shared mode.
|
||||
//
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||
|
||||
// RuntimeClassName specifies alternative runtime class for the
|
||||
// agent and server pods of the cluster in virtual or shared mode.
|
||||
//
|
||||
// +optional
|
||||
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
|
||||
@@ -25,6 +25,58 @@ func (in *Addon) DeepCopy() *Addon {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
|
||||
*out = *in
|
||||
if in.PriorityClass != nil {
|
||||
in, out := &in.PriorityClass, &out.PriorityClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ServerAffinity != nil {
|
||||
in, out := &in.ServerAffinity, &out.ServerAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AgentAffinity != nil {
|
||||
in, out := &in.AgentAffinity, &out.AgentAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RuntimeClassName != nil {
|
||||
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(v1.SecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
|
||||
func (in *AppliedPolicy) DeepCopy() *AppliedPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppliedPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
*out = *in
|
||||
@@ -163,6 +215,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.ServerAffinity != nil {
|
||||
in, out := &in.ServerAffinity, &out.ServerAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AgentAffinity != nil {
|
||||
in, out := &in.AgentAffinity, &out.AgentAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CustomCAs != nil {
|
||||
in, out := &in.CustomCAs, &out.CustomCAs
|
||||
*out = new(CustomCAs)
|
||||
@@ -173,6 +235,23 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SecretMounts != nil {
|
||||
in, out := &in.SecretMounts, &out.SecretMounts
|
||||
*out = make([]SecretMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(v1.SecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RuntimeClassName != nil {
|
||||
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -193,6 +272,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Policy != nil {
|
||||
in, out := &in.Policy, &out.Policy
|
||||
*out = new(AppliedPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
@@ -418,6 +502,11 @@ func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.StorageRequestSize != nil {
|
||||
in, out := &in.StorageRequestSize, &out.StorageRequestSize
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
|
||||
@@ -474,6 +563,22 @@ func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretMount) DeepCopyInto(out *SecretMount) {
|
||||
*out = *in
|
||||
in.SecretVolumeSource.DeepCopyInto(&out.SecretVolumeSource)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretMount.
|
||||
func (in *SecretMount) DeepCopy() *SecretMount {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretMount)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
|
||||
*out = *in
|
||||
@@ -518,6 +623,28 @@ func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageClassSyncConfig) DeepCopyInto(out *StorageClassSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSyncConfig.
|
||||
func (in *StorageClassSyncConfig) DeepCopy() *StorageClassSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StorageClassSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
|
||||
*out = *in
|
||||
@@ -527,6 +654,7 @@ func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
|
||||
in.Ingresses.DeepCopyInto(&out.Ingresses)
|
||||
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
|
||||
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
|
||||
in.StorageClasses.DeepCopyInto(&out.StorageClasses)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
|
||||
@@ -618,6 +746,16 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.DefaultServerAffinity != nil {
|
||||
in, out := &in.DefaultServerAffinity, &out.DefaultServerAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultAgentAffinity != nil {
|
||||
in, out := &in.DefaultAgentAffinity, &out.DefaultAgentAffinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.PodSecurityAdmissionLevel != nil {
|
||||
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
@@ -628,6 +766,16 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RuntimeClassName != nil {
|
||||
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(v1.SecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
|
||||
const (
|
||||
kubeletPortRangeConfigMapName = "k3k-kubelet-port-range"
|
||||
webhookPortRangeConfigMapName = "k3k-webhook-port-range"
|
||||
|
||||
rangeKey = "range"
|
||||
allocatedPortsKey = "allocatedPorts"
|
||||
@@ -30,7 +29,6 @@ type PortAllocator struct {
|
||||
ctrlruntimeclient.Client
|
||||
|
||||
KubeletCM *v1.ConfigMap
|
||||
WebhookCM *v1.ConfigMap
|
||||
}
|
||||
|
||||
func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*PortAllocator, error) {
|
||||
@@ -42,32 +40,20 @@ func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*Po
|
||||
return nil, fmt.Errorf("failed to find k3k controller namespace")
|
||||
}
|
||||
|
||||
var kubeletPortRangeCM, webhookPortRangeCM v1.ConfigMap
|
||||
var kubeletPortRangeCM v1.ConfigMap
|
||||
|
||||
kubeletPortRangeCM.Name = kubeletPortRangeConfigMapName
|
||||
kubeletPortRangeCM.Namespace = portRangeConfigMapNamespace
|
||||
|
||||
webhookPortRangeCM.Name = webhookPortRangeConfigMapName
|
||||
webhookPortRangeCM.Namespace = portRangeConfigMapNamespace
|
||||
|
||||
return &PortAllocator{
|
||||
Client: client,
|
||||
KubeletCM: &kubeletPortRangeCM,
|
||||
WebhookCM: &webhookPortRangeCM,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange, webhookPortRange string) manager.Runnable {
|
||||
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange string) manager.Runnable {
|
||||
return manager.RunnableFunc(func(ctx context.Context) error {
|
||||
if err := a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.getOrCreate(ctx, a.WebhookCM, webhookPortRange); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -99,14 +85,6 @@ func (a *PortAllocator) getOrCreate(ctx context.Context, configmap *v1.ConfigMap
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PortAllocator) AllocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
|
||||
return a.allocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM)
|
||||
}
|
||||
|
||||
func (a *PortAllocator) DeallocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string, webhookPort int) error {
|
||||
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM, webhookPort)
|
||||
}
|
||||
|
||||
func (a *PortAllocator) AllocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
|
||||
return a.allocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM)
|
||||
}
|
||||
|
||||
@@ -2,26 +2,21 @@ package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,11 +32,10 @@ type SharedAgent struct {
|
||||
imageRegistry string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort int, imagePullSecrets []string) *SharedAgent {
|
||||
return &SharedAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
@@ -49,7 +43,6 @@ func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token str
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
@@ -67,7 +60,6 @@ func (s *SharedAgent) EnsureResources(ctx context.Context) error {
|
||||
s.service(ctx),
|
||||
s.daemonset(ctx),
|
||||
s.dnsService(ctx),
|
||||
s.webhookTLS(ctx),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to ensure some resources: %w", err)
|
||||
}
|
||||
@@ -80,7 +72,7 @@ func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Ob
|
||||
}
|
||||
|
||||
func (s *SharedAgent) config(ctx context.Context) error {
|
||||
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort, s.webhookPort)
|
||||
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort)
|
||||
|
||||
configSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -99,7 +91,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
|
||||
return s.ensureObject(ctx, configSecret)
|
||||
}
|
||||
|
||||
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
|
||||
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort int) string {
|
||||
version := cluster.Spec.Version
|
||||
if cluster.Spec.Version == "" {
|
||||
version = cluster.Status.HostVersion
|
||||
@@ -112,9 +104,8 @@ serviceName: %s
|
||||
token: %v
|
||||
mirrorHostNodes: %t
|
||||
version: %s
|
||||
webhookPort: %d
|
||||
kubeletPort: %d`,
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, webhookPort, kubeletPort)
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, kubeletPort)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
@@ -142,7 +133,7 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: s.podSpec(),
|
||||
Spec: s.podSpec(ctx),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -150,7 +141,9 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
return s.ensureObject(ctx, deploy)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
hostNetwork := false
|
||||
dnsPolicy := v1.DNSClusterFirst
|
||||
|
||||
@@ -165,7 +158,15 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
image = s.imageRegistry + "/" + s.image
|
||||
}
|
||||
|
||||
// Use the agent affinity from the policy status if it exists, otherwise fall back to the spec.
|
||||
agentAffinity := s.cluster.Spec.AgentAffinity
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.AgentAffinity != nil {
|
||||
log.V(1).Info("Using agent affinity from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
agentAffinity = s.cluster.Status.Policy.AgentAffinity
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
Affinity: agentAffinity,
|
||||
HostNetwork: hostNetwork,
|
||||
DNSPolicy: dnsPolicy,
|
||||
ServiceAccountName: s.Name(),
|
||||
@@ -185,28 +186,6 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: WebhookSecretName(s.cluster.Name),
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "tls.crt",
|
||||
Path: "tls.crt",
|
||||
},
|
||||
{
|
||||
Key: "tls.key",
|
||||
Path: "tls.key",
|
||||
},
|
||||
{
|
||||
Key: "ca.crt",
|
||||
Path: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
@@ -242,11 +221,6 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
MountPath: "/opt/rancher/k3k/",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
MountPath: "/opt/rancher/k3k-webhook",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
@@ -254,11 +228,6 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
Protocol: v1.ProtocolTCP,
|
||||
ContainerPort: int32(s.kubeletPort),
|
||||
},
|
||||
{
|
||||
Name: "webhook-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
ContainerPort: int32(s.webhookPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -267,6 +236,23 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
securityContext := s.cluster.Spec.SecurityContext
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.SecurityContext != nil {
|
||||
log.V(1).Info("Using securityContext configuration from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
securityContext = s.cluster.Status.Policy.SecurityContext
|
||||
}
|
||||
|
||||
if securityContext != nil {
|
||||
podSpec.Containers[0].SecurityContext = securityContext
|
||||
}
|
||||
|
||||
runtimeClassName := s.cluster.Spec.RuntimeClassName
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.RuntimeClassName != nil {
|
||||
log.V(1).Info("Using runtimeClassName from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
runtimeClassName = s.cluster.Status.Policy.RuntimeClassName
|
||||
}
|
||||
|
||||
podSpec.RuntimeClassName = runtimeClassName
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -293,12 +279,6 @@ func (s *SharedAgent) service(ctx context.Context) error {
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: int32(s.kubeletPort),
|
||||
},
|
||||
{
|
||||
Name: "webhook-server",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: int32(s.webhookPort),
|
||||
TargetPort: intstr.FromInt32(int32(s.webhookPort)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -386,6 +366,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
|
||||
Resources: []string{"events"},
|
||||
Verbs: []string{"create"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"resourcequotas"},
|
||||
Verbs: []string{"get", "watch", "list"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingresses"},
|
||||
@@ -433,94 +418,3 @@ func (s *SharedAgent) roleBinding(ctx context.Context) error {
|
||||
|
||||
return s.ensureObject(ctx, roleBinding)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) webhookTLS(ctx context.Context) error {
|
||||
webhookSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: WebhookSecretName(s.cluster.Name),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(webhookSecret)
|
||||
if err := s.client.Get(ctx, key, webhookSecret); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
caPrivateKeyPEM, caCertPEM, err := newWebhookSelfSignedCACerts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := []string{s.Name(), s.cluster.Name}
|
||||
|
||||
webhookCert, webhookKey, err := newWebhookCerts(s.Name(), altNames, caPrivateKeyPEM, caCertPEM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
webhookSecret.Data = map[string][]byte{
|
||||
"tls.crt": webhookCert,
|
||||
"tls.key": webhookKey,
|
||||
"ca.crt": caCertPEM,
|
||||
"ca.key": caPrivateKeyPEM,
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, webhookSecret)
|
||||
}
|
||||
|
||||
// if the webhook secret is found we can skip
|
||||
// we should check for their validity
|
||||
return nil
|
||||
}
|
||||
|
||||
func newWebhookSelfSignedCACerts() ([]byte, []byte, error) {
|
||||
// generate CA CERT/KEY
|
||||
caPrivateKeyPEM, err := certutil.MakeEllipticPrivateKeyPEM()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
caPrivateKey, err := certutil.ParsePrivateKeyPEM(caPrivateKeyPEM)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: fmt.Sprintf("k3k-webhook-ca@%d", time.Now().Unix()),
|
||||
}
|
||||
|
||||
caCert, err := certutil.NewSelfSignedCACert(cfg, caPrivateKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
caCertPEM := certutil.EncodeCertPEM(caCert)
|
||||
|
||||
return caPrivateKeyPEM, caCertPEM, nil
|
||||
}
|
||||
|
||||
func newWebhookCerts(commonName string, subAltNames []string, caPrivateKey, caCert []byte) ([]byte, []byte, error) {
|
||||
// generate webhook cert bundle
|
||||
altNames := certs.AddSANs(subAltNames)
|
||||
oneYearExpiration := time.Until(time.Now().AddDate(1, 0, 0))
|
||||
|
||||
return certs.CreateClientCertKey(
|
||||
commonName,
|
||||
nil,
|
||||
&altNames,
|
||||
[]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
oneYearExpiration,
|
||||
string(caCert),
|
||||
string(caPrivateKey),
|
||||
)
|
||||
}
|
||||
|
||||
func WebhookSecretName(clusterName string) string {
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, "webhook")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
@@ -17,7 +17,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
serviceName string
|
||||
ip string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
token string
|
||||
}
|
||||
|
||||
@@ -39,7 +38,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
},
|
||||
},
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
ip: "10.0.0.21",
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
@@ -53,7 +51,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"version": "v1.2.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -73,7 +70,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
},
|
||||
ip: "10.0.0.21",
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
},
|
||||
@@ -86,7 +82,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"version": "v1.2.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -102,7 +97,6 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
},
|
||||
},
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
ip: "10.0.0.21",
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
@@ -116,14 +110,13 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"version": "v1.3.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort)
|
||||
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort)
|
||||
|
||||
data := make(map[string]string)
|
||||
err := yaml.Unmarshal([]byte(config), data)
|
||||
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/mounts"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -98,6 +100,15 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
"mode": "virtual",
|
||||
},
|
||||
}
|
||||
podSpec := v.podSpec(ctx, image, name)
|
||||
|
||||
if len(v.cluster.Spec.SecretMounts) > 0 {
|
||||
vols, volMounts := mounts.BuildSecretsMountsVolumes(v.cluster.Spec.SecretMounts, "agent")
|
||||
|
||||
podSpec.Volumes = append(podSpec.Volumes, vols...)
|
||||
|
||||
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volMounts...)
|
||||
}
|
||||
|
||||
deployment := &apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -116,7 +127,7 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: selector.MatchLabels,
|
||||
},
|
||||
Spec: v.podSpec(image, name, v.cluster.Spec.AgentArgs, &selector),
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -124,12 +135,23 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
return v.ensureObject(ctx, deployment)
|
||||
}
|
||||
|
||||
func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec {
|
||||
func (v *VirtualAgent) podSpec(ctx context.Context, image, name string) v1.PodSpec {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
var limit v1.ResourceList
|
||||
|
||||
args := v.cluster.Spec.AgentArgs
|
||||
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
|
||||
|
||||
// Use the agent affinity from the policy status if it exists, otherwise fall back to the spec.
|
||||
agentAffinity := v.cluster.Spec.AgentAffinity
|
||||
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.AgentAffinity != nil {
|
||||
log.V(1).Info("Using agent affinity from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
|
||||
agentAffinity = v.cluster.Status.Policy.AgentAffinity
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
Affinity: agentAffinity,
|
||||
NodeSelector: v.cluster.Spec.NodeSelector,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
@@ -250,5 +272,23 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
securityContext := v.cluster.Spec.SecurityContext
|
||||
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.SecurityContext != nil {
|
||||
log.V(1).Info("Using securityContext configuration from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
|
||||
securityContext = v.cluster.Status.Policy.SecurityContext
|
||||
}
|
||||
|
||||
if securityContext != nil {
|
||||
podSpec.Containers[0].SecurityContext = securityContext
|
||||
}
|
||||
|
||||
runtimeClassName := v.cluster.Spec.RuntimeClassName
|
||||
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.RuntimeClassName != nil {
|
||||
log.V(1).Info("Using runtimeClassName from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
|
||||
runtimeClassName = v.cluster.Status.Policy.RuntimeClassName
|
||||
}
|
||||
|
||||
podSpec.RuntimeClassName = runtimeClassName
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func Test_virtualAgentData(t *testing.T) {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -43,16 +45,22 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
namePrefix = "k3k"
|
||||
clusterController = "k3k-cluster-controller"
|
||||
clusterFinalizerName = "cluster.k3k.io/finalizer"
|
||||
ClusterInvalidName = "system"
|
||||
|
||||
SyncEnabledLabelKey = "k3k.io/sync-enabled"
|
||||
SyncSourceLabelKey = "k3k.io/sync-source"
|
||||
SyncSourceHostLabel = "host"
|
||||
|
||||
defaultVirtualClusterCIDR = "10.52.0.0/16"
|
||||
defaultVirtualServiceCIDR = "10.53.0.0/16"
|
||||
defaultSharedClusterCIDR = "10.42.0.0/16"
|
||||
defaultSharedServiceCIDR = "10.43.0.0/16"
|
||||
memberRemovalTimeout = time.Minute * 1
|
||||
|
||||
storageClassEnabledIndexField = "spec.sync.storageClasses.enabled"
|
||||
storageClassStatusEnabledIndexField = "status.policy.sync.storageClasses.enabled"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -116,15 +124,82 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
|
||||
},
|
||||
}
|
||||
|
||||
// index the 'spec.sync.storageClasses.enabled' field
|
||||
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassEnabledIndexField, func(rawObj client.Object) []string {
|
||||
vc := rawObj.(*v1beta1.Cluster)
|
||||
|
||||
if vc.Spec.Sync != nil && vc.Spec.Sync.StorageClasses.Enabled {
|
||||
return []string{"true"}
|
||||
}
|
||||
|
||||
return []string{"false"}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// index the 'status.policy.sync.storageClasses.enabled' field
|
||||
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassStatusEnabledIndexField, func(rawObj client.Object) []string {
|
||||
vc := rawObj.(*v1beta1.Cluster)
|
||||
|
||||
if vc.Status.Policy != nil && vc.Status.Policy.Sync != nil && vc.Status.Policy.Sync.StorageClasses.Enabled {
|
||||
return []string{"true"}
|
||||
}
|
||||
|
||||
return []string{"false"}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1beta1.Cluster{}).
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
|
||||
Watches(&storagev1.StorageClass{},
|
||||
handler.EnqueueRequestsFromMapFunc(reconciler.mapStorageClassToCluster),
|
||||
).
|
||||
Owns(&apps.StatefulSet{}).
|
||||
Owns(&v1.Service{}).
|
||||
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) mapStorageClassToCluster(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
if _, ok := obj.(*storagev1.StorageClass); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge and deduplicate clusters
|
||||
allClusters := make(map[types.NamespacedName]struct{})
|
||||
|
||||
var specClusterList v1beta1.ClusterList
|
||||
if err := r.Client.List(ctx, &specClusterList, client.MatchingFields{storageClassEnabledIndexField: "true"}); err != nil {
|
||||
log.Error(err, "error listing clusters with spec sync enabled for storageclass sync")
|
||||
} else {
|
||||
for _, cluster := range specClusterList.Items {
|
||||
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var statusClusterList v1beta1.ClusterList
|
||||
if err := r.Client.List(ctx, &statusClusterList, client.MatchingFields{storageClassStatusEnabledIndexField: "true"}); err != nil {
|
||||
log.Error(err, "error listing clusters with status sync enabled for storageclass sync")
|
||||
} else {
|
||||
for _, cluster := range statusClusterList.Items {
|
||||
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
requests := make([]reconcile.Request, 0, len(allClusters))
|
||||
for key := range allClusters {
|
||||
requests = append(requests, reconcile.Request{NamespacedName: key})
|
||||
}
|
||||
|
||||
return requests
|
||||
}
|
||||
|
||||
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
|
||||
return handler.Funcs{
|
||||
// We don't need to update for create or delete events
|
||||
@@ -351,11 +426,22 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.bindClusterRoles(ctx, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.bindClusterRoles(ctx, cluster)
|
||||
// Important: if you need to call the Server API of the Virtual Cluster
|
||||
// this needs to be done AFTER he kubeconfig has been generated
|
||||
|
||||
if err := c.ensureStorageClasses(ctx, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
|
||||
@@ -509,6 +595,10 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
Except: []string{cluster.Status.ClusterCIDR},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
@@ -516,6 +606,10 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
@@ -621,6 +715,120 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureStorageClasses(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Ensuring cluster StorageClasses")
|
||||
|
||||
virtualClient, err := newVirtualClient(ctx, c.Client, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating virtual client: %w", err)
|
||||
}
|
||||
|
||||
appliedSync := cluster.Spec.Sync.DeepCopy()
|
||||
|
||||
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
|
||||
appliedSync = cluster.Status.Policy.Sync
|
||||
}
|
||||
|
||||
// If storageclass sync is disabled, clean up any managed storage classes.
|
||||
if appliedSync == nil || !appliedSync.StorageClasses.Enabled {
|
||||
err := virtualClient.DeleteAllOf(ctx, &storagev1.StorageClass{}, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel})
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
var hostStorageClasses storagev1.StorageClassList
|
||||
if err := c.Client.List(ctx, &hostStorageClasses); err != nil {
|
||||
return fmt.Errorf("failed listing host storageclasses: %w", err)
|
||||
}
|
||||
|
||||
// filter the StorageClasses disabled for the sync, and the one not matching the selector
|
||||
filteredHostStorageClasses := make(map[string]storagev1.StorageClass)
|
||||
|
||||
for _, sc := range hostStorageClasses.Items {
|
||||
syncEnabled, found := sc.Labels[SyncEnabledLabelKey]
|
||||
|
||||
// if sync is disabled -> continue
|
||||
if found && syncEnabled != "true" {
|
||||
log.V(1).Info("sync is disabled", "sc-name", sc.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// if selector doesn't match -> continue
|
||||
// an empty selector matche everything
|
||||
selector := labels.SelectorFromSet(appliedSync.StorageClasses.Selector)
|
||||
if !selector.Matches(labels.Set(sc.Labels)) {
|
||||
log.V(1).Info("selector not matching", "sc-name", sc.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.V(1).Info("keeping storageclass", "sc-name", sc.Name)
|
||||
|
||||
filteredHostStorageClasses[sc.Name] = sc
|
||||
}
|
||||
|
||||
var virtStorageClasses storagev1.StorageClassList
|
||||
if err = virtualClient.List(ctx, &virtStorageClasses, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel}); err != nil {
|
||||
return fmt.Errorf("failed listing virtual storageclasses: %w", err)
|
||||
}
|
||||
|
||||
// delete StorageClasses with the sync disabled
|
||||
|
||||
for _, sc := range virtStorageClasses.Items {
|
||||
if _, found := filteredHostStorageClasses[sc.Name]; !found {
|
||||
log.V(1).Info("deleting storageclass", "sc-name", sc.Name)
|
||||
|
||||
if errDelete := virtualClient.Delete(ctx, &sc); errDelete != nil {
|
||||
log.Error(errDelete, "failed to delete virtual storageclass", "name", sc.Name)
|
||||
err = errors.Join(err, errDelete)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hostSc := range filteredHostStorageClasses {
|
||||
log.V(1).Info("updating storageclass", "sc-name", hostSc.Name)
|
||||
|
||||
virtualSc := hostSc.DeepCopy()
|
||||
|
||||
virtualSc.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: hostSc.Name,
|
||||
Labels: hostSc.Labels,
|
||||
Annotations: hostSc.Annotations,
|
||||
}
|
||||
|
||||
_, errCreateOrUpdate := controllerutil.CreateOrUpdate(ctx, virtualClient, virtualSc, func() error {
|
||||
virtualSc.Annotations = hostSc.Annotations
|
||||
|
||||
virtualSc.Labels = hostSc.Labels
|
||||
if len(virtualSc.Labels) == 0 {
|
||||
virtualSc.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
virtualSc.Labels[SyncSourceLabelKey] = SyncSourceHostLabel
|
||||
|
||||
virtualSc.Provisioner = hostSc.Provisioner
|
||||
virtualSc.Parameters = hostSc.Parameters
|
||||
virtualSc.ReclaimPolicy = hostSc.ReclaimPolicy
|
||||
virtualSc.MountOptions = hostSc.MountOptions
|
||||
virtualSc.AllowVolumeExpansion = hostSc.AllowVolumeExpansion
|
||||
virtualSc.VolumeBindingMode = hostSc.VolumeBindingMode
|
||||
virtualSc.AllowedTopologies = hostSc.AllowedTopologies
|
||||
|
||||
return nil
|
||||
})
|
||||
if errCreateOrUpdate != nil {
|
||||
log.Error(errCreateOrUpdate, "failed to create or update virtual storageclass", "name", virtualSc.Name)
|
||||
err = errors.Join(err, errCreateOrUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sync storageclasses: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
@@ -702,7 +910,6 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cl
|
||||
} else {
|
||||
// Assign port from pool if shared agent enabled mirroring of host nodes
|
||||
kubeletPort := 10250
|
||||
webhookPort := 9443
|
||||
|
||||
if cluster.Spec.MirrorHostNodes {
|
||||
var err error
|
||||
@@ -713,16 +920,9 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cl
|
||||
}
|
||||
|
||||
cluster.Status.KubeletPort = kubeletPort
|
||||
|
||||
webhookPort, err = c.PortAllocator.AllocateWebhookPort(ctx, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Status.WebhookPort = webhookPort
|
||||
}
|
||||
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, c.AgentImagePullSecrets)
|
||||
}
|
||||
|
||||
return agentEnsurer.EnsureResources(ctx)
|
||||
@@ -743,11 +943,6 @@ func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.Vi
|
||||
}
|
||||
}
|
||||
|
||||
// validate sync policy
|
||||
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
|
||||
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -40,17 +40,13 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Deallocate ports for kubelet and webhook if used
|
||||
// Deallocate ports for kubelet if used
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
|
||||
log.V(1).Info("dellocating ports for kubelet and webhook")
|
||||
log.V(1).Info("dellocating ports for kubelet")
|
||||
|
||||
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := c.PortAllocator.DeallocateWebhookPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.WebhookPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// delete API server lease
|
||||
|
||||
60
pkg/controller/cluster/mounts/mounts.go
Normal file
60
pkg/controller/cluster/mounts/mounts.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package mounts
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func BuildSecretsMountsVolumes(secretMounts []v1beta1.SecretMount, role string) ([]v1.Volume, []v1.VolumeMount) {
|
||||
var (
|
||||
vols []v1.Volume
|
||||
volMounts []v1.VolumeMount
|
||||
)
|
||||
|
||||
for _, secretMount := range secretMounts {
|
||||
if secretMount.SecretName == "" || secretMount.MountPath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if secretMount.Role == role || secretMount.Role == "" || secretMount.Role == "all" {
|
||||
vol, volMount := buildSecretMountVolume(secretMount)
|
||||
|
||||
vols = append(vols, vol)
|
||||
volMounts = append(volMounts, volMount)
|
||||
}
|
||||
}
|
||||
|
||||
return vols, volMounts
|
||||
}
|
||||
|
||||
func buildSecretMountVolume(secretMount v1beta1.SecretMount) (v1.Volume, v1.VolumeMount) {
|
||||
projectedVolSources := []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: secretMount.SecretName,
|
||||
},
|
||||
Items: secretMount.Items,
|
||||
Optional: secretMount.Optional,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vol := v1.Volume{
|
||||
Name: secretMount.SecretName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: projectedVolSources,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volMount := v1.VolumeMount{
|
||||
Name: secretMount.SecretName,
|
||||
MountPath: secretMount.MountPath,
|
||||
SubPath: secretMount.SubPath,
|
||||
}
|
||||
|
||||
return vol, volMount
|
||||
}
|
||||
523
pkg/controller/cluster/mounts/mounts_test.go
Normal file
523
pkg/controller/cluster/mounts/mounts_test.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package mounts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func Test_BuildSecretMountsVolume(t *testing.T) {
|
||||
type args struct {
|
||||
secretMounts []v1beta1.SecretMount
|
||||
role string
|
||||
}
|
||||
|
||||
type expectedVolumes struct {
|
||||
vols []v1.Volume
|
||||
volMounts []v1.VolumeMount
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expectedData expectedVolumes
|
||||
}{
|
||||
{
|
||||
name: "empty secret mounts",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: nil,
|
||||
volMounts: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil secret mounts",
|
||||
args: args{
|
||||
secretMounts: nil,
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: nil,
|
||||
volMounts: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single secret mount with no role specified defaults to all",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
},
|
||||
MountPath: "/mount-dir-1",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple secrets mounts with no role specified defaults to all",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
},
|
||||
MountPath: "/mount-dir-1",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-2",
|
||||
},
|
||||
MountPath: "/mount-dir-2",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", nil),
|
||||
expectedVolume("secret-2", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
|
||||
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single secret mount with items",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "key-1",
|
||||
Path: "path-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
MountPath: "/mount-dir-1",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", []v1.KeyToPath{{Key: "key-1", Path: "path-1"}}),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple secret mounts with items",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "key-1",
|
||||
Path: "path-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
MountPath: "/mount-dir-1",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-2",
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "key-2",
|
||||
Path: "path-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
MountPath: "/mount-dir-2",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", []v1.KeyToPath{{Key: "key-1", Path: "path-1"}}),
|
||||
expectedVolume("secret-2", []v1.KeyToPath{{Key: "key-2", Path: "path-2"}}),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/mount-dir-1", ""),
|
||||
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "user will specify the order",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "z-secret",
|
||||
},
|
||||
MountPath: "/z",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "a-secret",
|
||||
},
|
||||
MountPath: "/a",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "m-secret",
|
||||
},
|
||||
MountPath: "/m",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("z-secret", nil),
|
||||
expectedVolume("a-secret", nil),
|
||||
expectedVolume("m-secret", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("z-secret", "/z", ""),
|
||||
expectedVolumeMount("a-secret", "/a", ""),
|
||||
expectedVolumeMount("m-secret", "/m", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skip entries with empty secret name",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
MountPath: "/mount-dir-1",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-2",
|
||||
},
|
||||
MountPath: "/mount-dir-2",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-2", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skip entries with empty mount path",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
},
|
||||
MountPath: "",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-2",
|
||||
},
|
||||
MountPath: "/mount-dir-2",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-2", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-2", "/mount-dir-2", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret mount with subPath",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
},
|
||||
MountPath: "/etc/rancher/k3s/registries.yaml",
|
||||
SubPath: "registries.yaml",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/etc/rancher/k3s/registries.yaml", "registries.yaml"),
|
||||
},
|
||||
},
|
||||
},
|
||||
// Role-based filtering tests
|
||||
{
|
||||
name: "role server includes only server and all roles",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "server-secret",
|
||||
},
|
||||
MountPath: "/server",
|
||||
Role: "server",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "agent-secret",
|
||||
},
|
||||
MountPath: "/agent",
|
||||
Role: "agent",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "all-secret",
|
||||
},
|
||||
MountPath: "/all",
|
||||
Role: "all",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("server-secret", nil),
|
||||
expectedVolume("all-secret", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("server-secret", "/server", ""),
|
||||
expectedVolumeMount("all-secret", "/all", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "role agent includes only agent and all roles",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "server-secret",
|
||||
},
|
||||
MountPath: "/server",
|
||||
Role: "server",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "agent-secret",
|
||||
},
|
||||
MountPath: "/agent",
|
||||
Role: "agent",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "all-secret",
|
||||
},
|
||||
MountPath: "/all",
|
||||
Role: "all",
|
||||
},
|
||||
},
|
||||
role: "agent",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("agent-secret", nil),
|
||||
expectedVolume("all-secret", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("agent-secret", "/agent", ""),
|
||||
expectedVolumeMount("all-secret", "/all", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty role in secret mount defaults to all",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "no-role-secret",
|
||||
},
|
||||
MountPath: "/no-role",
|
||||
Role: "",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "server-secret",
|
||||
},
|
||||
MountPath: "/server",
|
||||
Role: "server",
|
||||
},
|
||||
},
|
||||
role: "agent",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("no-role-secret", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("no-role-secret", "/no-role", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed roles with server filter",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "registry-config",
|
||||
},
|
||||
MountPath: "/etc/rancher/k3s/registries.yaml",
|
||||
SubPath: "registries.yaml",
|
||||
Role: "all",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "server-config",
|
||||
},
|
||||
MountPath: "/etc/server",
|
||||
Role: "server",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "agent-config",
|
||||
},
|
||||
MountPath: "/etc/agent",
|
||||
Role: "agent",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("registry-config", nil),
|
||||
expectedVolume("server-config", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("registry-config", "/etc/rancher/k3s/registries.yaml", "registries.yaml"),
|
||||
expectedVolumeMount("server-config", "/etc/server", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all secrets have role all",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-1",
|
||||
},
|
||||
MountPath: "/secret-1",
|
||||
Role: "all",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "secret-2",
|
||||
},
|
||||
MountPath: "/secret-2",
|
||||
Role: "all",
|
||||
},
|
||||
},
|
||||
role: "server",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: []v1.Volume{
|
||||
expectedVolume("secret-1", nil),
|
||||
expectedVolume("secret-2", nil),
|
||||
},
|
||||
volMounts: []v1.VolumeMount{
|
||||
expectedVolumeMount("secret-1", "/secret-1", ""),
|
||||
expectedVolumeMount("secret-2", "/secret-2", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no secrets match agent role",
|
||||
args: args{
|
||||
secretMounts: []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "server-only",
|
||||
},
|
||||
MountPath: "/server-only",
|
||||
Role: "server",
|
||||
},
|
||||
},
|
||||
role: "agent",
|
||||
},
|
||||
expectedData: expectedVolumes{
|
||||
vols: nil,
|
||||
volMounts: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vols, volMounts := BuildSecretsMountsVolumes(tt.args.secretMounts, tt.args.role)
|
||||
|
||||
assert.Equal(t, tt.expectedData.vols, vols)
|
||||
assert.Equal(t, tt.expectedData.volMounts, volMounts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func expectedVolume(name string, items []v1.KeyToPath) v1.Volume {
|
||||
return v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Items: items,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func expectedVolumeMount(name, mountPath, subPath string) v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: mountPath,
|
||||
SubPath: subPath,
|
||||
}
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func serverOptions(cluster *v1beta1.Cluster, token string) string {
|
||||
}
|
||||
|
||||
if cluster.Spec.Mode != agent.VirtualNodeMode {
|
||||
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage"
|
||||
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage\n"
|
||||
}
|
||||
// TODO: Add extra args to the options
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
@@ -18,17 +17,18 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/mounts"
|
||||
)
|
||||
|
||||
const (
|
||||
k3kSystemNamespace = "k3k-system"
|
||||
serverName = "server"
|
||||
configName = "server-config"
|
||||
initConfigName = "init-server-config"
|
||||
serverName = "server"
|
||||
configName = "server-config"
|
||||
initConfigName = "init-server-config"
|
||||
)
|
||||
|
||||
// Server
|
||||
@@ -54,8 +54,18 @@ func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePull
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
|
||||
func (s *Server) podSpec(ctx context.Context, image, name string, persistent bool, startupCmd string) v1.PodSpec {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// Use the server affinity from the policy status if it exists, otherwise fall back to the spec.
|
||||
serverAffinity := s.cluster.Spec.ServerAffinity
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.ServerAffinity != nil {
|
||||
log.V(1).Info("Using server affinity from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
serverAffinity = s.cluster.Status.Policy.ServerAffinity
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
Affinity: serverAffinity,
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
PriorityClassName: s.cluster.Spec.PriorityClass,
|
||||
Volumes: []v1.Volume{
|
||||
@@ -236,6 +246,24 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
}
|
||||
}
|
||||
|
||||
securityContext := s.cluster.Spec.SecurityContext
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.SecurityContext != nil {
|
||||
log.V(1).Info("Using securityContext configuration from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
securityContext = s.cluster.Status.Policy.SecurityContext
|
||||
}
|
||||
|
||||
if securityContext != nil {
|
||||
podSpec.Containers[0].SecurityContext = securityContext
|
||||
}
|
||||
|
||||
runtimeClassName := s.cluster.Spec.RuntimeClassName
|
||||
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.RuntimeClassName != nil {
|
||||
log.V(1).Info("Using runtimeClassName from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
|
||||
runtimeClassName = s.cluster.Status.Policy.RuntimeClassName
|
||||
}
|
||||
|
||||
podSpec.RuntimeClassName = runtimeClassName
|
||||
|
||||
// specify resource limits if specified for the servers.
|
||||
if s.cluster.Spec.ServerLimit != nil {
|
||||
podSpec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
@@ -280,67 +308,31 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
volumeMounts []v1.VolumeMount
|
||||
)
|
||||
|
||||
for _, addon := range s.cluster.Spec.Addons {
|
||||
namespace := k3kSystemNamespace
|
||||
if addon.SecretNamespace != "" {
|
||||
namespace = addon.SecretNamespace
|
||||
}
|
||||
|
||||
nn := types.NamespacedName{
|
||||
Name: addon.SecretRef,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
var addons v1.Secret
|
||||
if err := s.client.Get(ctx, nn, &addons); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clusterAddons := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addons.Name,
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Data: make(map[string][]byte, len(addons.Data)),
|
||||
}
|
||||
for k, v := range addons.Data {
|
||||
clusterAddons.Data[k] = v
|
||||
}
|
||||
|
||||
if err := s.client.Create(ctx, &clusterAddons); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
name := "varlibrancherk3smanifests" + addon.SecretRef
|
||||
volume := v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: addon.SecretRef,
|
||||
},
|
||||
},
|
||||
}
|
||||
volumes = append(volumes, volume)
|
||||
|
||||
volumeMount := v1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
|
||||
// changes to this part of the filesystem shouldn't be done manually. The secret should be updated instead.
|
||||
ReadOnly: true,
|
||||
}
|
||||
volumeMounts = append(volumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
|
||||
vols, mounts, err := s.loadCACertBundle(ctx)
|
||||
if len(s.cluster.Spec.Addons) > 0 {
|
||||
addonsVols, addonsMounts, err := s.buildAddonsVolumes(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, addonsVols...)
|
||||
|
||||
volumeMounts = append(volumeMounts, addonsMounts...)
|
||||
}
|
||||
|
||||
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
|
||||
vols, mounts, err := s.buildCABundleVolumes(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, vols...)
|
||||
|
||||
volumeMounts = append(volumeMounts, mounts...)
|
||||
}
|
||||
|
||||
if len(s.cluster.Spec.SecretMounts) > 0 {
|
||||
vols, mounts := mounts.BuildSecretsMountsVolumes(s.cluster.Spec.SecretMounts, "server")
|
||||
|
||||
volumes = append(volumes, vols...)
|
||||
|
||||
volumeMounts = append(volumeMounts, mounts...)
|
||||
@@ -358,7 +350,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podSpec := s.podSpec(image, name, persistent, startupCommand)
|
||||
podSpec := s.podSpec(ctx, image, name, persistent, startupCommand)
|
||||
podSpec.Volumes = append(podSpec.Volumes, volumes...)
|
||||
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
|
||||
|
||||
@@ -406,7 +398,7 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
|
||||
StorageClassName: s.cluster.Spec.Persistence.StorageClassName,
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
|
||||
"storage": *s.cluster.Spec.Persistence.StorageRequestSize,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -442,7 +434,7 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
func (s *Server) buildCABundleVolumes(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
if s.cluster.Spec.CustomCAs == nil {
|
||||
return nil, nil, fmt.Errorf("customCAs not found")
|
||||
}
|
||||
@@ -534,6 +526,71 @@ func (s *Server) mountCACert(volumeName, certName, secretName string, subPathMou
|
||||
return volume, mounts
|
||||
}
|
||||
|
||||
func (s *Server) buildAddonsVolumes(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
var (
|
||||
volumes []v1.Volume
|
||||
mounts []v1.VolumeMount
|
||||
)
|
||||
|
||||
for _, addon := range s.cluster.Spec.Addons {
|
||||
namespace := s.cluster.Namespace
|
||||
if addon.SecretNamespace != "" {
|
||||
namespace = addon.SecretNamespace
|
||||
}
|
||||
|
||||
nn := types.NamespacedName{
|
||||
Name: addon.SecretRef,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
var addons v1.Secret
|
||||
if err := s.client.Get(ctx, nn, &addons); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// skip creating the addon secret if it already exists and in the same namespace as the cluster
|
||||
if namespace != s.cluster.Namespace {
|
||||
clusterAddons := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addons.Name,
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Data: addons.Data,
|
||||
}
|
||||
|
||||
if _, err := controllerutil.CreateOrUpdate(ctx, s.client, &clusterAddons, func() error {
|
||||
return controllerutil.SetOwnerReference(s.cluster, &clusterAddons, s.client.Scheme())
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
name := "addon-" + addon.SecretRef
|
||||
volume := v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: addon.SecretRef,
|
||||
},
|
||||
},
|
||||
}
|
||||
volumes = append(volumes, volume)
|
||||
|
||||
volumeMount := v1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
|
||||
ReadOnly: true,
|
||||
}
|
||||
mounts = append(mounts, volumeMount)
|
||||
}
|
||||
|
||||
return volumes, mounts, nil
|
||||
}
|
||||
|
||||
func sortedKeys(keyMap map[string]string) []string {
|
||||
keys := make([]string, 0, len(keyMap))
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcu
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&apps.StatefulSet{}).
|
||||
WithEventFilter(newClusterPredicate()).
|
||||
Owns(&v1.Pod{}).
|
||||
Named(statefulsetController).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
@@ -192,7 +193,9 @@ func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1
|
||||
return true
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -2,13 +2,17 @@ package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -52,15 +56,36 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
selector := labels.NewSelector()
|
||||
currentPolicyName := ns.Labels[PolicyNameLabelKey]
|
||||
|
||||
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
|
||||
selector = selector.Add(*req)
|
||||
}
|
||||
// This will match all the resources managed by the K3k Policy controller
|
||||
// that have the app.kubernetes.io/managed-by=k3k-policy-controller label
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
})
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
// If the namespace is not bound to any policy, or if the policy it was bound to no longer exists,
|
||||
// we need to clear policy-related fields on its Cluster objects.
|
||||
if currentPolicyName == "" {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in unbound namespace", "namespace", ns.Name)
|
||||
}
|
||||
} else {
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: currentPolicyName}, &policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in namespace with non-existent policy", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
} else {
|
||||
log.Error(err, "error getting policy for namespace", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
requirement, err := labels.NewRequirement(
|
||||
PolicyNameLabelKey, selection.NotEquals, []string{currentPolicyName},
|
||||
)
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
@@ -90,3 +115,30 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearPolicyFieldsForClustersInNamespace sets the policy status on Cluster objects in the given namespace to nil.
|
||||
func (c *VirtualClusterPolicyReconciler) clearPolicyFieldsForClustersInNamespace(ctx context.Context, namespace string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
|
||||
return fmt.Errorf("failed listing clusters in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
var updateErrs []error
|
||||
|
||||
for i := range clusters.Items {
|
||||
cluster := clusters.Items[i]
|
||||
if cluster.Status.Policy != nil {
|
||||
log.V(1).Info("Clearing policy status for Cluster", "cluster", cluster.Name, "namespace", namespace)
|
||||
cluster.Status.Policy = nil
|
||||
|
||||
if updateErr := c.Client.Status().Update(ctx, &cluster); updateErr != nil {
|
||||
updateErr = fmt.Errorf("failed updating Status for Cluster %s: %w", cluster.Name, updateErr)
|
||||
updateErrs = append(updateErrs, updateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(updateErrs...)
|
||||
}
|
||||
|
||||
@@ -165,6 +165,7 @@ func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
if oldNode.Spec.PodCIDR != newNode.Spec.PodCIDR {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldNode.Spec.PodCIDRs, newNode.Spec.PodCIDRs) {
|
||||
podCIDRChanged = true
|
||||
}
|
||||
@@ -469,16 +470,26 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
origStatus := cluster.Status.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
cluster.Status.Policy = &v1beta1.AppliedPolicy{
|
||||
Name: policy.Name,
|
||||
PriorityClass: &policy.Spec.DefaultPriorityClass,
|
||||
NodeSelector: policy.Spec.DefaultNodeSelector,
|
||||
Sync: policy.Spec.Sync,
|
||||
ServerAffinity: policy.Spec.DefaultServerAffinity,
|
||||
AgentAffinity: policy.Spec.DefaultAgentAffinity,
|
||||
SecurityContext: policy.Spec.SecurityContext,
|
||||
RuntimeClassName: policy.Spec.RuntimeClassName,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
if !reflect.DeepEqual(origStatus, &cluster.Status) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ func newEncoder(format string) zapcore.Encoder {
|
||||
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
var encoder zapcore.Encoder
|
||||
|
||||
if format == "text" {
|
||||
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
encoder = zapcore.NewConsoleEncoder(encCfg)
|
||||
|
||||
@@ -16,8 +16,8 @@ echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
|
||||
echo "Current TAG: ${VERSION} "
|
||||
|
||||
export CGO_ENABLED=0
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
|
||||
# build the cli for the local OS and ARCH
|
||||
go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3kcli ./cli
|
||||
go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3kcli ./cli
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
set -eou pipefail
|
||||
|
||||
|
||||
CONTROLLER_TOOLS_VERSION=v0.16.0
|
||||
CONTROLLER_TOOLS_VERSION=v0.20.0
|
||||
|
||||
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
# allowDangerousTypes is needed for struct that use floats
|
||||
@@ -14,7 +14,6 @@ go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSIO
|
||||
|
||||
# add the 'helm.sh/resource-policy: keep' annotation to the CRDs
|
||||
for f in ./charts/k3k/templates/crds/*.yaml; do
|
||||
sed -i '0,/^[[:space:]]*annotations:/s/^[[:space:]]*annotations:/&\n helm.sh\/resource-policy: keep/' "$f"
|
||||
echo "Validating $f"
|
||||
yq . "$f" > /dev/null
|
||||
echo "Annotating $f"
|
||||
yq -c -i '.metadata.annotations["helm.sh/resource-policy"] = "keep"' "$f"
|
||||
done
|
||||
|
||||
@@ -18,6 +18,7 @@ SUBCOMMAND_FILES=(
|
||||
"$DOCS_DIR/k3kcli_cluster_create.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_delete.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_list.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_update.md"
|
||||
"$DOCS_DIR/k3kcli_kubeconfig.md"
|
||||
"$DOCS_DIR/k3kcli_kubeconfig_generate.md"
|
||||
"$DOCS_DIR/k3kcli_policy.md"
|
||||
|
||||
417
tests/cli/cli_test.go
Normal file
417
tests/cli/cli_test.go
Normal file
@@ -0,0 +1,417 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func K3kcli(args ...string) (string, string, error) {
|
||||
return runCmd("k3kcli", args...)
|
||||
}
|
||||
|
||||
func Kubectl(args ...string) (string, string, error) {
|
||||
return runCmd("kubectl", args...)
|
||||
}
|
||||
|
||||
func runCmd(cmdName string, args ...string) (string, string, error) {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
|
||||
cmd := exec.CommandContext(context.Background(), cmdName, args...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
It("can get the version", func() {
|
||||
stdout, _, err := K3kcli("--version")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("k3kcli version "))
|
||||
})
|
||||
|
||||
When("trying the cluster commands", func() {
|
||||
It("can create, list and delete a cluster", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
stdout, stderr, err = K3kcli("cluster", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(clusterNamespace))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
|
||||
// The deletion could take a bit
|
||||
Eventually(func() string {
|
||||
stdout, stderr, err := K3kcli("cluster", "list", "-n", clusterNamespace)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
|
||||
return stdout + stderr
|
||||
}).
|
||||
WithTimeout(time.Second * 5).
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
|
||||
It("can create a cluster with the specified kubernetes version", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", "--version", k3sVersion, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the policy commands", func() {
|
||||
It("can create, list and delete a policy", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
policyName := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(Not(ContainSubstring(policyName)))
|
||||
})
|
||||
|
||||
It("can bound a policy to a namespace", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
namespace := NewNamespace()
|
||||
namespaceName := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespaceName)
|
||||
})
|
||||
|
||||
By("Creating a policy and binding to a namespace")
|
||||
|
||||
policy1Name := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy1Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy1Name))
|
||||
})
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Creating another policy and binding to the same namespace without the --overwrite flag")
|
||||
|
||||
policy2Name := "policy-" + rand.String(5)
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy2Name))
|
||||
})
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Forcing the other policy binding with the overwrite flag")
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, "--overwrite", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy2Name))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the cluster update commands", func() {
|
||||
It("can update a cluster's server count", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
// Create the cluster first
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
// Update the cluster server count
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--servers", "2", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Updating cluster"))
|
||||
|
||||
// Verify the cluster state was actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Servers).To(Not(BeNil()))
|
||||
Expect(*cluster.Spec.Servers).To(Equal(int32(2)))
|
||||
})
|
||||
|
||||
It("can update a cluster's version", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
// Create the cluster with initial version
|
||||
_, stderr, err = K3kcli("cluster", "create", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
// Update the cluster version
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", k3sVersion, "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Updating cluster"))
|
||||
|
||||
// Verify the cluster state was actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
|
||||
})
|
||||
|
||||
It("fails to downgrade cluster version", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
// Create the cluster with a version
|
||||
_, stderr, err = K3kcli("cluster", "create", "--version", k3sVersion, "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
// Attempt to downgrade should fail
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("downgrading cluster version is not supported"))
|
||||
|
||||
// Verify the cluster version was NOT changed
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
|
||||
})
|
||||
|
||||
It("fails to update a non-existent cluster", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
// Attempt to update a cluster that doesn't exist
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--servers", "2", "non-existent-cluster")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("failed to fetch cluster"))
|
||||
})
|
||||
|
||||
It("can update a cluster's labels", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
// Create the cluster first
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
// Update the cluster with labels
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--labels", "env=test", "--labels", "team=dev", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Updating cluster"))
|
||||
|
||||
// Verify the cluster labels were actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Labels).To(HaveKeyWithValue("env", "test"))
|
||||
Expect(cluster.Labels).To(HaveKeyWithValue("team", "dev"))
|
||||
})
|
||||
|
||||
It("can update a cluster's annotations", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
// Create the cluster first
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
// Update the cluster with annotations
|
||||
_, stderr, err = K3kcli("cluster", "update", "-y", "--annotations", "description=test-cluster", "--annotations", "owner=qa-team", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Updating cluster"))
|
||||
|
||||
// Verify the cluster annotations were actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Annotations).To(HaveKeyWithValue("description", "test-cluster"))
|
||||
Expect(cluster.Annotations).To(HaveKeyWithValue("owner", "qa-team"))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the kubeconfig command", func() {
|
||||
It("can generate a kubeconfig", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("kubeconfig", "generate", "--namespace", clusterNamespace, "--name", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
})
|
||||
})
|
||||
})
|
||||
55
tests/cli/common_test.go
Normal file
55
tests/cli/common_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func NewNamespace() *v1.Namespace {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
func DeleteNamespaces(names ...string) {
|
||||
GinkgoHelper()
|
||||
|
||||
if _, found := os.LookupEnv("KEEP_NAMESPACES"); found {
|
||||
By(fmt.Sprintf("Keeping namespace %v", names))
|
||||
return
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(names))
|
||||
|
||||
for _, name := range names {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
56
tests/cli/k8s_restclientgetter_test.go
Normal file
56
tests/cli/k8s_restclientgetter_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
memory "k8s.io/client-go/discovery/cached"
|
||||
)
|
||||
|
||||
type RESTClientGetter struct {
|
||||
clientconfig clientcmd.ClientConfig
|
||||
restConfig *rest.Config
|
||||
discoveryClient discovery.CachedDiscoveryInterface
|
||||
}
|
||||
|
||||
func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) {
|
||||
clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restConfig, err := clientconfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dc, err := discovery.NewDiscoveryClientForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RESTClientGetter{
|
||||
clientconfig: clientconfig,
|
||||
restConfig: restConfig,
|
||||
discoveryClient: memory.NewMemCacheClient(dc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) {
|
||||
return r.restConfig, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
return r.discoveryClient, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
|
||||
return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
return r.clientconfig
|
||||
}
|
||||
254
tests/cli/tests_suite_test.go
Normal file
254
tests/cli/tests_suite_test.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/k3s"
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
k3kNamespace = "k3k-system"
|
||||
|
||||
k3sVersion = "v1.35.2-k3s1"
|
||||
k3sOldVersion = "v1.35.0-k3s1"
|
||||
)
|
||||
|
||||
func TestTests(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Tests Suite")
|
||||
}
|
||||
|
||||
var (
|
||||
k3sContainer *k3s.K3sContainer
|
||||
restcfg *rest.Config
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
helmActionConfig *action.Configuration
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
||||
|
||||
if dockerInstallEnabled {
|
||||
repo := os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
|
||||
initKubernetesClient()
|
||||
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
|
||||
} else {
|
||||
initKubernetesClient()
|
||||
}
|
||||
})
|
||||
|
||||
func initKubernetesClient() {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
||||
|
||||
kubeconfig, err = os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k8s, err = kubernetes.NewForConfig(restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
scheme := buildScheme()
|
||||
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
}
|
||||
|
||||
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
|
||||
if k3sHostVersion == "" {
|
||||
k3sHostVersion = k3sVersion
|
||||
}
|
||||
|
||||
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
|
||||
|
||||
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
containerIP, err := k3sContainer.ContainerIP(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("K3s containerIP: " + containerIP)
|
||||
|
||||
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "kubeconfig-")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, err = tmpFile.Write(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
|
||||
}
|
||||
|
||||
func installK3kChart(controllerImage, kubeletImage string) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kChart, err := loader.Load(path.Join(pwd, "../../charts/k3k"))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
helmActionConfig = new(action.Configuration)
|
||||
|
||||
kubeconfig, err := os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
iCli := action.NewInstall(helmActionConfig)
|
||||
iCli.ReleaseName = "k3k"
|
||||
iCli.Namespace = k3kNamespace
|
||||
iCli.CreateNamespace = true
|
||||
iCli.Timeout = time.Minute
|
||||
iCli.Wait = true
|
||||
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
|
||||
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
|
||||
extraEnvArray = append(extraEnvArray, map[string]any{
|
||||
"name": "DEBUG",
|
||||
"value": "true",
|
||||
})
|
||||
controllerMap["extraEnv"] = extraEnvArray
|
||||
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": controllerImage,
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
|
||||
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": kubeletImage,
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
if k3sContainer != nil {
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", k3sLogs)
|
||||
|
||||
// dump k3k controller logs
|
||||
k3kLogs := getK3kLogs(ctx)
|
||||
writeLogs("k3k.log", k3kLogs)
|
||||
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
}
|
||||
})
|
||||
|
||||
func getK3kLogs(ctx context.Context) io.ReadCloser {
|
||||
var podList v1.PodList
|
||||
|
||||
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(podList.Items).NotTo(BeEmpty())
|
||||
|
||||
k3kPod := podList.Items[0]
|
||||
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{})
|
||||
podLogs, err := req.Stream(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return podLogs
|
||||
}
|
||||
|
||||
func writeLogs(filename string, logs io.ReadCloser) {
|
||||
logsStr, err := io.ReadAll(logs)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tempfile := path.Join(os.TempDir(), filename)
|
||||
err = os.WriteFile(tempfile, []byte(logsStr), 0o644)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("logs written to: " + filename)
|
||||
|
||||
_ = logs.Close()
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func K3kcli(args ...string) (string, string, error) {
|
||||
return runCmd("k3kcli", args...)
|
||||
}
|
||||
|
||||
func Kubectl(args ...string) (string, string, error) {
|
||||
return runCmd("kubectl", args...)
|
||||
}
|
||||
|
||||
func runCmd(cmdName string, args ...string) (string, string, error) {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
|
||||
cmd := exec.CommandContext(context.Background(), cmdName, args...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
It("can get the version", func() {
|
||||
stdout, _, err := K3kcli("--version")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("k3kcli version v"))
|
||||
})
|
||||
|
||||
When("trying the cluster commands", func() {
|
||||
It("can create, list and delete a cluster", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
stdout, stderr, err = K3kcli("cluster", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(clusterNamespace))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
|
||||
// The deletion could take a bit
|
||||
Eventually(func() string {
|
||||
stdout, stderr, err := K3kcli("cluster", "list", "-n", clusterNamespace)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
return stdout + stderr
|
||||
}).
|
||||
WithTimeout(time.Second * 5).
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
|
||||
It("can create a cluster with the specified kubernetes version", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.6-k3s1", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the policy commands", func() {
|
||||
It("can create, list and delete a policy", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
policyName := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(Not(ContainSubstring(policyName)))
|
||||
})
|
||||
|
||||
It("can bound a policy to a namespace", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
namespaceName := "ns-" + rand.String(5)
|
||||
|
||||
_, _, err = Kubectl("create", "namespace", namespaceName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespaceName)
|
||||
})
|
||||
|
||||
By("Creating a policy and binding to a namespace")
|
||||
|
||||
policy1Name := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy1Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy1Name))
|
||||
})
|
||||
|
||||
var ns v1.Namespace
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Creating another policy and binding to the same namespace without the --overwrite flag")
|
||||
|
||||
policy2Name := "policy-" + rand.String(5)
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy2Name))
|
||||
})
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Forcing the other policy binding with the overwrite flag")
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, "--overwrite", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy2Name))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the kubeconfig command", func() {
|
||||
It("can generate a kubeconfig", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("kubeconfig", "generate", "--name", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
})
|
||||
})
|
||||
})
|
||||
235
tests/e2e/cluster_addons_test.go
Normal file
235
tests/e2e/cluster_addons_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
addonsTestsLabel = "addons"
|
||||
addonsSecretName = "k3s-addons"
|
||||
secretMountManifestMountPath = "/var/lib/rancher/k3s/server/manifests/nginx.yaml"
|
||||
addonManifestMountPath = "/var/lib/rancher/k3s/server/manifests/k3s-addons/nginx.yaml"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with secretMounts configuration is used to load addons", Label("e2e"), Label(addonsTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
// Create the addon secret
|
||||
err := createAddonSecret(ctx, namespace.Name)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.SecretMounts = []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: addonsSecretName,
|
||||
},
|
||||
MountPath: secretMountManifestMountPath,
|
||||
SubPath: "nginx.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: virtualClient,
|
||||
}
|
||||
})
|
||||
|
||||
It("will load the addon manifest in server pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, secretMountManifestMountPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(addonContent).To(Equal(addonTestFile))
|
||||
})
|
||||
|
||||
It("will deploy the addon pod in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a cluster with addon configuration is used with addons secret in the same namespace", Label("e2e"), Label(addonsTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
// Create the addon secret
|
||||
err := createAddonSecret(ctx, namespace.Name)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.Addons = []v1beta1.Addon{
|
||||
{
|
||||
SecretNamespace: namespace.Name,
|
||||
SecretRef: addonsSecretName,
|
||||
},
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: virtualClient,
|
||||
}
|
||||
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, addonManifestMountPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(addonContent).To(Equal(addonTestFile))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a cluster with addon configuration is used with addons secret in the different namespace", Label("e2e"), Label(addonsTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
secretNamespace := NewNamespace()
|
||||
|
||||
// Create the addon secret
|
||||
err := createAddonSecret(ctx, secretNamespace.Name)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name, secretNamespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.Addons = []v1beta1.Addon{
|
||||
{
|
||||
SecretNamespace: secretNamespace.Name,
|
||||
SecretRef: addonsSecretName,
|
||||
},
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
virtualClient, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: virtualClient,
|
||||
}
|
||||
})
|
||||
|
||||
It("will load the addon manifest in server pod and deploys the pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
addonContent, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, addonManifestMountPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
addonTestFile, err := os.ReadFile("testdata/addons/nginx.yaml")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(addonContent).To(Equal(addonTestFile))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nginxPod, err := virtualCluster.Client.CoreV1().Pods("default").Get(ctx, "nginx-addon", metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(nginxPod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
func createAddonSecret(ctx context.Context, namespace string) error {
|
||||
addonContent, err := os.ReadFile("testdata/addons/nginx.yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonsSecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"nginx.yaml": addonContent,
|
||||
},
|
||||
}
|
||||
|
||||
return k8sClient.Create(ctx, secret)
|
||||
}
|
||||
341
tests/e2e/cluster_app_test.go
Normal file
341
tests/e2e/cluster_app_test.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
translator *translate.ToHostTranslator
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
translator = translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Deployment with a PVC", func() {
|
||||
var (
|
||||
deployment *appsv1.Deployment
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
|
||||
namespace = "default"
|
||||
labels = map[string]string{
|
||||
"app": "k3k-deployment-test-app",
|
||||
}
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
By("Creating the PVC")
|
||||
|
||||
pvc = &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvc, err = virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Creating the Deployment")
|
||||
|
||||
deployment = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: ptr.To[int32](3),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "data-volume",
|
||||
MountPath: "/data",
|
||||
}},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "data-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deployment, err = virtualCluster.Client.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should bound the PVC in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
virtualPVC, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(virtualPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should bound the PVC in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPVCName := translator.NamespacedName(pvc)
|
||||
|
||||
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
hostPodName := translator.NamespacedName(&pod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a StatefulSet with a PVC", func() {
|
||||
var (
|
||||
statefulSet *appsv1.StatefulSet
|
||||
|
||||
namespace = "default"
|
||||
labels = map[string]string{
|
||||
"app": "k3k-sts-test-app",
|
||||
}
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := "default"
|
||||
|
||||
By("Creating the StatefulSet")
|
||||
|
||||
statefulSet = &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-sts-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: ptr.To[int32](3),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "www",
|
||||
MountPath: "/usr/share/nginx/html",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "www",
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
statefulSet, err = virtualCluster.Client.AppsV1().StatefulSets(namespace).Create(ctx, statefulSet, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should bound the PVCs in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
g.Expect(pvc.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should bound the PVCs in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(statefulSet.Namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
hostPVCName := translator.NamespacedName(&pvc)
|
||||
|
||||
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
hostPodName := translator.NamespacedName(&pod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -38,18 +36,24 @@ var _ = When("a cluster with custom certificates is installed with individual ce
|
||||
}
|
||||
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
var (
|
||||
cert, key []byte
|
||||
err error
|
||||
)
|
||||
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -98,12 +102,10 @@ var _ = When("a cluster with custom certificates is installed with individual ce
|
||||
It("will load the custom certs in the server pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
// check server-ca.crt
|
||||
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
|
||||
44
tests/e2e/cluster_create_test.go
Normal file
44
tests/e2e/cluster_create_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("creating a shared mode cluster", Label(e2eTestLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
CreateCluster(cluster)
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
It("creates nodes with the worker role", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(GinkgoT().Context(), metav1.ListOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(nodes.Items).To(HaveLen(1))
|
||||
g.Expect(nodes.Items[0].Labels).To(HaveKeyWithValue("node-role.kubernetes.io/worker", "true"))
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
@@ -59,12 +59,10 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
@@ -75,10 +73,10 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
return serverPods[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
@@ -90,7 +88,9 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -102,6 +102,7 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
Eventually(func() error {
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -20,16 +20,132 @@ import (
|
||||
)
|
||||
|
||||
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
translator *translate.ToHostTranslator
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
translator = translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod without any Affinity", func() {
|
||||
var pod *v1.Pod
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should have the default Affinity", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPodName := translator.NamespacedName(pod)
|
||||
|
||||
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
|
||||
|
||||
preferredScheduling := hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
g.Expect(preferredScheduling).To(Not(BeEmpty()))
|
||||
g.Expect(preferredScheduling[0].Weight).To(Equal(int32(100)))
|
||||
g.Expect(preferredScheduling[0].Preference.MatchExpressions).To(Not(BeEmpty()))
|
||||
g.Expect(preferredScheduling[0].Preference.MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with an Affinity", func() {
|
||||
var pod *v1.Pod
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"fake"},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should not have the default Affinity", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPodName := translator.NamespacedName(pod)
|
||||
|
||||
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
|
||||
|
||||
requiredScheduling := hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
g.Expect(requiredScheduling).To(Not(BeNil()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms).To(Not(BeEmpty()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions).To(Not(BeEmpty()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Values).To(ContainElement("fake"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with an invalid configuration", func() {
|
||||
var virtualPod *v1.Pod
|
||||
|
||||
@@ -89,6 +205,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
|
||||
@@ -110,6 +227,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
@@ -117,9 +235,11 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars[0].ValueFrom).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
@@ -136,7 +256,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
By("Checking the container status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
@@ -148,15 +267,18 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
|
||||
g.Expect(envVar.ValueFrom).To(BeNil())
|
||||
g.Expect(envVar.Value).To(Equal(virtualPod.Name))
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
@@ -200,7 +322,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
By("Checking the status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
hPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
@@ -216,6 +337,82 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with downward API variables in environment variable", func() {
|
||||
var virtualPod *v1.Pod
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "STATUS_POD_IP",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should be scheduled and running in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
g.Expect(pod.Status.PodIP).NotTo(BeEmpty())
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should be scheduled and running in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("installing the nginx-ingress controller", func() {
|
||||
BeforeAll(func() {
|
||||
By("installing the nginx-ingress controller")
|
||||
200
tests/e2e/cluster_policy_sync_storageclass_test.go
Normal file
200
tests/e2e/cluster_policy_sync_storageclass_test.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster is created in a namespace with a policy", Ordered, Label(e2eTestLabel), func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
virtualCluster *VirtualCluster
|
||||
vcp *v1beta1.VirtualClusterPolicy
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
ctx = context.Background()
|
||||
|
||||
// 1. Create StorageClasses in host
|
||||
storageClassEnabled := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sc-policy-enabled-",
|
||||
Labels: map[string]string{
|
||||
cluster.SyncEnabledLabelKey: "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "my-provisioner",
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
storageClassEnabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
storageClassDisabled := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sc-policy-disabled-",
|
||||
Labels: map[string]string{
|
||||
cluster.SyncEnabledLabelKey: "false",
|
||||
},
|
||||
},
|
||||
Provisioner: "my-provisioner",
|
||||
}
|
||||
|
||||
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// 2. Create VirtualClusterPolicy with StorageClass sync enabled
|
||||
vcp = &v1beta1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "vcp-sync-sc-",
|
||||
},
|
||||
Spec: v1beta1.VirtualClusterPolicySpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
StorageClasses: v1beta1.StorageClassSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = k8sClient.Create(ctx, vcp)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// 3. Create Namespace with policy label
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ns-vcp-",
|
||||
Labels: map[string]string{
|
||||
policy.PolicyNameLabelKey: vcp.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
// We use the k8s clientset for namespace creation to stay consistent with other tests
|
||||
ns, err = k8s.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// 4. Create VirtualCluster in that namespace
|
||||
// The cluster doesn't have storage class sync enabled in its spec
|
||||
clusterObj := NewCluster(ns.Name)
|
||||
clusterObj.Spec.Sync = &v1beta1.SyncConfig{
|
||||
StorageClasses: v1beta1.StorageClassSyncConfig{
|
||||
Enabled: false,
|
||||
},
|
||||
}
|
||||
clusterObj.Spec.Expose.NodePort.ServerPort = ptr.To[int32](30000)
|
||||
|
||||
CreateCluster(clusterObj)
|
||||
|
||||
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(clusterObj)
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: clusterObj,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
Kubeconfig: kubeconfig,
|
||||
}
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(ns.Name)
|
||||
|
||||
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = k8sClient.Delete(ctx, vcp)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
})
|
||||
|
||||
It("has the storage classes sync enabled from the policy", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
|
||||
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
|
||||
g.Expect(virtualCluster.Cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(virtualCluster.Cluster.Status.Policy.Sync).To(Not(BeNil()))
|
||||
g.Expect(virtualCluster.Cluster.Status.Policy.Sync.StorageClasses.Enabled).To(BeTrue())
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("will sync host storage classes with the sync enabled in the host", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
// We only care about the storage classes we created for this test to avoid noise
|
||||
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 60).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("will not sync host storage classes with the sync disabled in the host", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 60).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
When("disabling the storage class sync in the policy", Ordered, func() {
|
||||
BeforeAll(func() {
|
||||
original := vcp.DeepCopy()
|
||||
vcp.Spec.Sync.StorageClasses.Enabled = false
|
||||
err := k8sClient.Patch(ctx, vcp, client.MergeFrom(original))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("will remove the synced storage classes from the virtual cluster", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 60).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
163
tests/e2e/cluster_registry_test.go
Normal file
163
tests/e2e/cluster_registry_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with private registry configuration is used", Label("e2e"), Label(registryTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
vcp := &v1beta1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: v1beta1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1beta1.VirtualClusterMode,
|
||||
DisableNetworkPolicy: true,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, vcp)).To(Succeed())
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(namespace), namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
namespace.Labels = map[string]string{
|
||||
policy.PolicyNameLabelKey: vcp.Name,
|
||||
}
|
||||
Expect(k8sClient.Update(ctx, namespace)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
Expect(k8sClient.Delete(ctx, vcp)).To(Succeed())
|
||||
})
|
||||
|
||||
err = privateRegistry(ctx, namespace.Name)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// configure the cluster with the private registry secrets using SecretMounts
|
||||
// Using subPath allows mounting individual files while keeping parent directories writable
|
||||
cluster.Spec.SecretMounts = []v1beta1.SecretMount{
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "k3s-registry-config",
|
||||
},
|
||||
MountPath: "/etc/rancher/k3s/registries.yaml",
|
||||
SubPath: "registries.yaml",
|
||||
},
|
||||
{
|
||||
SecretVolumeSource: v1.SecretVolumeSource{
|
||||
SecretName: "private-registry-ca-cert",
|
||||
},
|
||||
MountPath: "/etc/rancher/k3s/tls/ca.crt",
|
||||
SubPath: "tls.crt",
|
||||
},
|
||||
}
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
|
||||
// airgap the k3k-server pod
|
||||
err = buildRegistryNetPolicy(ctx, cluster.Namespace)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
It("will be load the registries.yaml and crts in server pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
// check registries.yaml
|
||||
registriesConfigPath := "/etc/rancher/k3s/registries.yaml"
|
||||
registriesConfig, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, registriesConfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
registriesConfigTestFile, err := os.ReadFile("testdata/registry/registries.yaml")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(registriesConfig).To(Equal(registriesConfigTestFile))
|
||||
|
||||
// check ca.crt
|
||||
CACrtPath := "/etc/rancher/k3s/tls/ca.crt"
|
||||
CACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, CACrtPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
CACrtTestFile, err := os.ReadFile("testdata/registry/certs/ca.crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(CACrt).To(Equal(CACrtTestFile))
|
||||
})
|
||||
It("will only pull images from mirrored docker.io registry", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
// make sure that any pod using docker.io mirror works
|
||||
virtualCluster.NewNginxPod("")
|
||||
|
||||
// creating a pod with image that uses any registry other than docker.io should fail
|
||||
// for example public.ecr.aws/docker/library/alpine:latest
|
||||
alpinePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "alpine-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "alpine",
|
||||
Image: "public.ecr.aws/docker/library/alpine:latest",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
By("Creating Alpine Pod and making sure its failing to start")
|
||||
|
||||
var err error
|
||||
|
||||
alpinePod, err = virtualCluster.Client.CoreV1().Pods(alpinePod.Namespace).Create(ctx, alpinePod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// check that the alpine Pod is failing to pull the image
|
||||
Eventually(func(g Gomega) {
|
||||
alpinePod, err = virtualCluster.Client.CoreV1().Pods(alpinePod.Namespace).Get(ctx, alpinePod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
status, _ := pod.GetContainerStatus(alpinePod.Status.ContainerStatuses, "alpine")
|
||||
state := status.State.Waiting
|
||||
g.Expect(state).NotTo(BeNil())
|
||||
|
||||
g.Expect(state.Reason).To(BeEquivalentTo("ImagePullBackOff"))
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedv1 "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -99,6 +100,100 @@ var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusT
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("created with field controlled from a policy", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClass)).To(Succeed())
|
||||
|
||||
clusterObj := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "status-cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
PriorityClass: priorityClass.Name,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClass)).To(Succeed())
|
||||
})
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the initial status to be set
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.Name).To(Equal(vcp.Name))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
|
||||
// update policy
|
||||
|
||||
priorityClassVCP := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClassVCP)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClassVCP)).To(Succeed())
|
||||
})
|
||||
|
||||
vcp.Spec.DefaultPriorityClass = priorityClassVCP.Name
|
||||
Expect(k8sClient.Update(ctx, vcp)).To(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*clusterObj.Status.Policy.PriorityClass).To(Equal(priorityClassVCP.Name))
|
||||
g.Expect(clusterObj.Spec.PriorityClass).To(Equal(priorityClass.Name))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("and the cluster has validation errors", func() {
|
||||
194
tests/e2e/cluster_sync_storageclass_test.go
Normal file
194
tests/e2e/cluster_sync_storageclass_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
virtualCluster *VirtualCluster
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
ctx = context.Background()
|
||||
virtualCluster = NewVirtualCluster()
|
||||
|
||||
storageClassEnabled := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sc-",
|
||||
Labels: map[string]string{
|
||||
cluster.SyncEnabledLabelKey: "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "my-provisioner",
|
||||
}
|
||||
|
||||
storageClassEnabled, err := k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
storageClassDisabled := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sc-",
|
||||
Labels: map[string]string{
|
||||
cluster.SyncEnabledLabelKey: "false",
|
||||
},
|
||||
},
|
||||
Provisioner: "my-provisioner",
|
||||
}
|
||||
|
||||
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
|
||||
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
})
|
||||
|
||||
It("has disabled the storage classes sync", func() {
|
||||
Expect(virtualCluster.Cluster.Spec.Sync).To(Not(BeNil()))
|
||||
Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeFalse())
|
||||
})
|
||||
|
||||
It("doesn't have storage classes", func() {
|
||||
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(virtualStorageClasses.Items).To(HaveLen(0))
|
||||
})
|
||||
|
||||
It("has some storage classes in the host", func() {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(hostStorageClasses.Items).To(Not(HaveLen(0)))
|
||||
})
|
||||
|
||||
It("can create storage classes in the virtual cluster", func() {
|
||||
storageClass := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "sc-",
|
||||
},
|
||||
Provisioner: "my-provisioner",
|
||||
}
|
||||
|
||||
storageClass, err := virtualCluster.Client.StorageV1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(virtualStorageClasses.Items).To(HaveLen(1))
|
||||
Expect(virtualStorageClasses.Items[0].Name).To(Equal(storageClass.Name))
|
||||
})
|
||||
|
||||
When("enabling the storage class sync", Ordered, func() {
|
||||
BeforeAll(func() {
|
||||
GinkgoWriter.Println("Enabling the storage class sync")
|
||||
|
||||
original := virtualCluster.Cluster.DeepCopy()
|
||||
|
||||
virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled = true
|
||||
|
||||
err := k8sClient.Patch(ctx, virtualCluster.Cluster, client.MergeFrom(original))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
|
||||
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
|
||||
g.Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeTrue())
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("will sync host storage classes with the sync enabled", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
|
||||
|
||||
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
}
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("will not sync host storage classes with the sync disabled", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
|
||||
|
||||
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
}
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("editing a synced storage class in the host cluster", Ordered, func() {
|
||||
var syncedStorageClass *storagev1.StorageClass
|
||||
|
||||
BeforeAll(func() {
|
||||
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, hostSC := range hostStorageClasses.Items {
|
||||
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
|
||||
syncedStorageClass = &hostSC
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Expect(syncedStorageClass).To(Not(BeNil()))
|
||||
|
||||
syncedStorageClass.Labels["foo"] = "bar"
|
||||
_, err = k8s.StorageV1().StorageClasses().Update(ctx, syncedStorageClass, metav1.UpdateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("will update the synced storage class in the virtual cluster", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, syncedStorageClass.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(syncedStorageClass.Labels).Should(HaveKeyWithValue("foo", "bar"))
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -79,6 +79,7 @@ var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), f
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
|
||||
var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -174,7 +176,9 @@ var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label
|
||||
|
||||
var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -238,7 +242,9 @@ var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel)
|
||||
|
||||
var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -389,7 +395,9 @@ var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Labe
|
||||
|
||||
var _ = When("a virtual mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -459,6 +467,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -469,8 +478,8 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial version
|
||||
cluster.Spec.Version = "v1.31.13-k3s1"
|
||||
// Add initial old version
|
||||
cluster.Spec.Version = k3sOldVersion
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
@@ -501,13 +510,14 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
|
||||
It("will update server version when version spec is updated", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// update cluster version
|
||||
cluster.Spec.Version = "v1.32.8-k3s1"
|
||||
cluster.Spec.Version = k3sVersion
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -530,6 +540,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -545,6 +556,7 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -555,8 +567,8 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial version
|
||||
cluster.Spec.Version = "v1.31.13-k3s1"
|
||||
// Add initial old version
|
||||
cluster.Spec.Version = k3sOldVersion
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To[int32](1)
|
||||
@@ -589,15 +601,17 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("will update server version when version spec is updated", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// update cluster version
|
||||
cluster.Spec.Version = "v1.32.8-k3s1"
|
||||
cluster.Spec.Version = k3sVersion
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -646,6 +660,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -688,6 +703,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -716,6 +732,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -731,6 +748,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -777,6 +795,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
})
|
||||
It("will scale down server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -803,6 +822,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -818,6 +838,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -860,6 +881,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -888,6 +910,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -903,6 +926,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -952,6 +976,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
|
||||
By("Scaling down cluster")
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -233,6 +233,7 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
|
||||
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
|
||||
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -266,6 +267,7 @@ func NewVirtualK8sClientAndKubeconfig(cluster *v1beta1.Cluster) (*kubernetes.Cli
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
|
||||
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
|
||||
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -397,27 +399,28 @@ func (c *VirtualCluster) ExecCmd(pod *v1.Pod, command string) (string, string, e
|
||||
func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
|
||||
GinkgoHelper()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
serverPod := serverPods[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
|
||||
err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
|
||||
Eventually(func(g Gomega) {
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
g.Expect(serverPods).To(HaveLen(1))
|
||||
g.Expect(serverPods[0].DeletionTimestamp).To(Not(BeNil()))
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
}
|
||||
|
||||
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
|
||||
9
tests/e2e/testdata/addons/nginx.yaml
vendored
Normal file
9
tests/e2e/testdata/addons/nginx.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-addon
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user