Compare commits

..

16 Commits

Author SHA1 Message Date
renovate-rancher[bot]
1d5943d054 chore(deps): update github actions 2026-04-09 07:01:18 +00:00
renovate-rancher[bot]
d9c504283e chore(deps): update rancher/renovate-config digest to 20f34a3 (#749)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-04-08 23:07:22 +02:00
Enrico Candino
4738ab2c56 Schedule Pods in the same Node with a preferred affinity (#724) (#736)
* Add integration tests for Deployment and StatefulSet creation with PVC in shared cluster

* Add affinity settings for pod scheduling based on agent hostname

* increased timeout

* focus test

* rmeove cleanup

* check for existing pvc

* remove focus

* add affinity tests for Pods in shared cluster

* refactor restartServerPod to improve pod restart checks and timeout handling

* unfocus

* fix test description
2026-04-02 22:48:33 +02:00
Jonathan Crowther
7c1292b262 Pin installed versions to specific hashes (#730) (#732) 2026-03-27 12:21:20 -04:00
Jonathan Crowther
3038276b02 [v1.0] Refactor tests to their own directories (#725)
* Refactor tests to their own directories (#723)

* Move cli tests

* Move e2e tests to their own directory

* Move integration tests

* Fix path within the cli tests

* Move k3k-kubelet tests

* Improve the various make test- options

* Remove dead code from cli tests

* Update development.md with the new make commands

* Remove tests that didn't exist in v1.0
2026-03-26 11:38:51 -04:00
Enrico Candino
6da59f16b9 update golang grpc module (#712) (#713) 2026-03-20 00:31:23 +01:00
Jonathan Crowther
118ed838a7 Backport macOS script compatibility changes (#709) 2026-03-18 14:52:35 -04:00
Enrico Candino
fe924d3b89 Bump some tes dependencies and fix lint (#708) (#710) 2026-03-18 19:11:13 +01:00
Enrico Candino
831821206e chore(deps): update github actions (#707)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 12:26:20 +01:00
Enrico Candino
16f7a71861 chore(deps): update dependency go to v1.25.8 (#664) (#706)
Co-authored-by: renovate-rancher[bot] <119870437+renovate-rancher[bot]@users.noreply.github.com>
2026-03-18 12:16:13 +01:00
Hussein Galal
b055aeff0f Refactor distribution algorithm to account for host capacity (#695)
* Refactor distribution algorithm to account for host capacity

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-03-12 21:28:18 +02:00
Enrico Candino
5ef99f83ee refactor: streamline K3S Docker installation and chart setup with dynamic repository handling (#694) 2026-03-12 11:11:17 +01:00
Hussein Galal
f55dd45775 use apireader instead of client for node registration in mirror host node (#687)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-03-10 17:41:02 +02:00
Enrico Candino
f2badbfdb1 Added policy in Cluster Status (#663) (#677)
* initial implementation

restored policyName

* added test, fixed priority scheduling

* requested changes from review

- wrapped errors
- fixed some kube-api-linter issues to match k8s conventions
- moved policy namespace check in the same condition branch
2026-02-18 11:31:29 +01:00
Andreas Kupries
e87d879a91 Merge pull request #675 from andreas-kupries/syncer-controller-owner-v1.0-backport
[v1.0 backport] change ControllerReferences over to OwnerReferences
2026-02-17 11:56:54 +01:00
Andreas Kupries
502f4389d2 fix: switch ControllerReferences over to OwnerReferences 2026-02-17 11:11:23 +01:00
59 changed files with 984 additions and 8873 deletions

View File

@@ -4,8 +4,7 @@
"github>rancher/renovate-config#release"
],
"baseBranchPatterns": [
"main",
"release/v1.0"
"main"
],
"prHourlyLimit": 2
}

View File

@@ -19,18 +19,18 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Set up Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
with:
distribution: goreleaser
version: v2
@@ -40,7 +40,7 @@ jobs:
REGISTRY: ""
- name: Run Trivy vulnerability scanner (k3kcli)
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
@@ -56,7 +56,7 @@ jobs:
category: k3kcli
- name: Run Trivy vulnerability scanner (k3k)
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
@@ -72,7 +72,7 @@ jobs:
category: k3k
- name: Run Trivy vulnerability scanner (k3k-kubelet)
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # v0.35.0
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'

View File

@@ -6,16 +6,12 @@ on:
permissions:
contents: write
env:
HELM_VERSION: v4.1.3
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
jobs:
chart-release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
@@ -23,14 +19,11 @@ jobs:
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install helm
run: |
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
sudo mv /tmp/helm /usr/local/bin
sudo chmod +x /usr/local/bin/helm
- name: Install Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Run chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0

View File

@@ -24,7 +24,7 @@ jobs:
run: echo "::error::Missing tag from input" && exit 1
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Check if release is draft
run: |

View File

@@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
@@ -31,17 +31,17 @@ jobs:
run: git checkout ${{ inputs.commit }}
- name: Set up Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
with:
image: tonistiigi/binfmt:qemu-v10.0.4-56
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
version: v0.30.1
@@ -62,7 +62,7 @@ jobs:
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
- name: Login to container registry
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ env.DOCKER_USERNAME }}
@@ -85,7 +85,7 @@ jobs:
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6
with:
distribution: goreleaser
version: v2

View File

@@ -51,7 +51,7 @@ permissions:
jobs:
call-workflow:
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@240174f0ae1994c3f6e94b8b062ea4aceed4a182 # release
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@20f34a3e3d54ab17f4fd5a3037edd62f58e26c7a # release
with:
configMigration: ${{ inputs.configMigration || 'true' }}
logLevel: ${{ inputs.logLevel || 'info' }}

View File

@@ -4,91 +4,54 @@ on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
inputs:
k3k_version:
description: 'K3k version to test (e.g. v1.0.2). Leave empty to build from source.'
required: false
type: string
k8s_version:
description: 'Kubernetes version to test'
required: false
type: choice
options:
- ""
- "v1.34.6"
- "v1.35.3"
permissions:
contents: read
env:
K8S_VERSIONS: "v1.34.6,v1.35.3"
HELM_VERSION: v4.1.3
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
K3D_VERSION: v5.8.3
K3D_BIN_HASH_AMD64: dbaa79a76ace7f4ca230a1ff41dc7d8a5036a8ad0309e9c54f9bf3836dbe853e
ARCH: amd64
KUBECTL_VERSION: v1.35.3
jobs:
setup:
runs-on: ubuntu-latest
outputs:
k8s_versions: ${{ steps.set-matrix.outputs.k8s_versions }}
steps:
- id: set-matrix
run: |
if [[ -z "${{ inputs.k8s_version }}" ]]; then
JSON_ARRAY=$(jq -nc '"${{ env.K8S_VERSIONS }}" | split(",")')
echo "k8s_versions=${JSON_ARRAY}" >> "$GITHUB_OUTPUT"
else
echo "k8s_versions=[\"${{ inputs.k8s_version }}\"]" >> "$GITHUB_OUTPUT"
fi
conformance:
needs: setup
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
k8s_version: ${{ fromJSON(needs.setup.outputs.k8s_versions) }}
type:
- parallel
- serial
env:
KUBERNETES_VERSION: ${{ matrix.k8s_version }}
K3D_VERSION: v5.8.3
K3D_BIN_HASH_AMD64: "dbaa79a76ace7f4ca230a1ff41dc7d8a5036a8ad0309e9c54f9bf3836dbe853e"
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Install helm
run: |
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
sudo mv /tmp/helm /usr/local/bin
sudo chmod +x /usr/local/bin/helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
- name: Install k3d # taken from github.com/rancher/rancher/.github/workflows/integration-tests.yaml
run: |
curl -sSfL -o k3d "https://github.com/k3d-io/k3d/releases/download/${{ env.K3D_VERSION }}/k3d-linux-amd64"
echo "${{ env.K3D_BIN_HASH_AMD64 }} k3d" | sha256sum --check
curl -sSfL -o k3d "https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-${ARCH}"
echo "${K3D_BIN_HASH_AMD64} k3d" | shasum -a 256 --check
sudo mv k3d /usr/local/bin
sudo chmod +x /usr/local/bin/k3d
- name: Install k3d and kubectl
run: |
curl -LO "https://dl.k8s.io/release/${{ env.KUBERNETES_VERSION }}/bin/linux/amd64/kubectl"
curl -LO "https://dl.k8s.io/release/${{ env.KUBERNETES_VERSION }}/bin/linux/amd64/kubectl.sha256"
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
@@ -100,15 +63,13 @@ jobs:
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 2 \
--image rancher/k3s:${{ env.KUBERNETES_VERSION }}-k3s1 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k (from source)
if: inputs.k3k_version == ''
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
@@ -126,29 +87,9 @@ jobs:
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
- name: Setup K3k (from release)
if: inputs.k3k_version != ''
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
K3K_VERSION="${{ inputs.k3k_version }}"
CHART_VERSION="${K3K_VERSION#v}"
helm repo add k3k https://rancher.github.io/k3k
helm repo update
helm install --namespace k3k-system --create-namespace --version "${CHART_VERSION}" k3k k3k/k3k
wget -qO k3kcli "https://github.com/rancher/k3k/releases/download/${{ inputs.k3k_version }}/k3kcli-linux-amd64"
sudo mv k3kcli /usr/local/bin/k3kcli
sudo chmod +x /usr/local/bin/k3kcli
- name: Wait for K3k controller
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
echo "Wait for K3k controller deployment to be available"
kubectl wait -n k3k-system deployment -l "app.kubernetes.io/name=k3k" --for=condition=Available --timeout=5m
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
@@ -183,30 +124,33 @@ jobs:
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
hydrophone --conformance --parallel 4 \
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: conformance-${{ matrix.k8s_version }}-logs
path: /tmp/e2e.log
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive results
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
- name: Archive conformance logs
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: conformance-${{ matrix.k8s_version }}-results
path: /tmp/junit_01.xml
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
- name: Job Summary
if: always()
run: |
echo '## 📊 Conformance Tests Results (${{ matrix.k8s_version }})' >> $GITHUB_STEP_SUMMARY
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY

View File

@@ -4,89 +4,49 @@ on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
inputs:
k3k_version:
description: 'K3k version to test (e.g. v1.0.2). Leave empty to build from source.'
required: false
type: string
k8s_version:
description: 'Kubernetes version to test'
required: false
type: choice
options:
- ""
- "v1.34.6"
- "v1.35.3"
permissions:
contents: read
env:
K8S_VERSIONS: "v1.34.6,v1.35.3"
HELM_VERSION: v4.1.3
HELM_BIN_HASH_AMD64: 02ce9722d541238f81459938b84cf47df2fdf1187493b4bfb2346754d82a4700
jobs:
setup:
runs-on: ubuntu-latest
outputs:
k8s_versions: ${{ steps.set-matrix.outputs.k8s_versions }}
steps:
- id: set-matrix
run: |
if [[ -z "${{ inputs.k8s_version }}" ]]; then
JSON_ARRAY=$(jq -nc '"${{ env.K8S_VERSIONS }}" | split(",")')
echo "k8s_versions=${JSON_ARRAY}" >> "$GITHUB_OUTPUT"
else
echo "k8s_versions=[\"${{ inputs.k8s_version }}\"]" >> "$GITHUB_OUTPUT"
fi
conformance:
needs: setup
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
k8s_version: ${{ fromJSON(needs.setup.outputs.k8s_versions) }}
env:
KUBERNETES_VERSION: ${{ matrix.k8s_version }}
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
- name: Install helm
run: |
curl -sSfL -o helm.tar.gz https://get.helm.sh/helm-${{ env.HELM_VERSION }}-linux-amd64.tar.gz
echo "${{ env.HELM_BIN_HASH_AMD64 }} helm.tar.gz" | sha256sum --check
tar -xvzf helm.tar.gz --strip-components=1 -C /tmp/
sudo mv /tmp/helm /usr/local/bin
sudo chmod +x /usr/local/bin/helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
- name: Install k3s
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
K3S_HOST_VERSION: ${{ env.KUBERNETES_VERSION }}+k3s1
K3S_HOST_VERSION: v1.33.7+k3s1
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
kubectl cluster-info
kubectl get nodes
- name: Setup K3k (from source)
if: inputs.k3k_version == ''
- name: Build, package and setup K3k
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
@@ -100,29 +60,9 @@ jobs:
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Setup K3k (from release)
if: inputs.k3k_version != ''
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
K3K_VERSION="${{ inputs.k3k_version }}"
CHART_VERSION="${K3K_VERSION#v}"
helm repo add k3k https://rancher.github.io/k3k
helm repo update
helm install --namespace k3k-system --create-namespace --version "${CHART_VERSION}" k3k k3k/k3k
wget -qO k3kcli "https://github.com/rancher/k3k/releases/download/${{ inputs.k3k_version }}/k3kcli-linux-amd64"
sudo mv k3kcli /usr/local/bin/k3kcli
sudo chmod +x /usr/local/bin/k3kcli
- name: Wait for K3k controller
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
echo "Wait for K3k controller deployment to be available"
kubectl wait -n k3k-system deployment -l "app.kubernetes.io/name=k3k" --for=condition=Available --timeout=5m
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
@@ -139,13 +79,23 @@ jobs:
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
hydrophone --conformance --parallel 4 \
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Collect logs
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Export logs
if: always()
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
@@ -154,37 +104,30 @@ jobs:
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive K3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: k3s-${{ matrix.k8s_version }}-logs
name: k3s-${{ matrix.type }}-logs
path: /tmp/k3s.log
- name: Archive K3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: k3k-${{ matrix.k8s_version }}-logs
name: k3k-${{ matrix.type }}-logs
path: /tmp/k3k.log
- name: Archive conformance logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: conformance-${{ matrix.k8s_version }}-logs
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
- name: Archive results
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
if: always()
with:
name: conformance-${{ matrix.k8s_version }}-results
path: /tmp/junit_01.xml
- name: Job Summary
if: always()
run: |
echo '## 📊 Conformance Tests Results (${{ matrix.k8s_version }})' >> $GITHUB_STEP_SUMMARY
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY

View File

@@ -10,21 +10,18 @@ on:
permissions:
contents: read
env:
KUBERNETES_VERSION: v1.35.3
jobs:
tests-e2e:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -39,11 +36,11 @@ jobs:
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
@@ -67,36 +64,28 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Export logs
if: always()
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-k3k-logs
@@ -106,12 +95,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -126,11 +115,11 @@ jobs:
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
@@ -154,37 +143,29 @@ jobs:
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Export logs
if: always()
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-slow-k3s-logs
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: e2e-slow-k3k-logs
name: e2e-k3k-logs
path: /tmp/k3k.log

View File

@@ -10,18 +10,15 @@ on:
permissions:
contents: read
env:
KUBERNETES_VERSION: v1.35.3
jobs:
tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -29,7 +26,7 @@ jobs:
run: make test-unit
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
@@ -40,12 +37,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
- uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
@@ -58,7 +55,7 @@ jobs:
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=${{ env.KUBERNETES_VERSION }}+k3s1" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
run: |
@@ -81,21 +78,21 @@ jobs:
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@57e3a136b779b570ffcdbf80b3bdc90e7fab3de2 # v6
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${{ github.workspace }}/covdata/cover.out
flags: cli
- name: Archive k3s logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: cli-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
if: always()
with:
name: cli-k3k-logs

View File

@@ -15,10 +15,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Set up Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version-file: go.mod
cache: true
@@ -29,7 +29,7 @@ jobs:
- name: Run linters
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
version: v2.11.4
version: v2.11.3
args: -v
only-new-issues: true
skip-cache: false

View File

@@ -5,7 +5,7 @@ VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
## Dependencies
GOLANGCI_LINT_VERSION := v2.11.4
GOLANGCI_LINT_VERSION := v2.11.3
GINKGO_VERSION ?= v2.28.1
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
@@ -81,7 +81,7 @@ test-integration: ## Run the controller tests that use envtest (tests/integratio
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) $(GINKGO_FLAGS) --flake-attempts=3 --label-filter="$(E2E_LABEL_FILTER)" tests/e2e
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests/e2e
.PHONY: test-cli
test-cli: ## Run the cli tests

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 1.1.0-rc3
appVersion: v1.1.0-rc3
version: 1.0.2
appVersion: v1.0.2

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -34,6 +34,7 @@ spec:
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --agent-image-pull-secret
- {{ .name }}
@@ -54,4 +55,7 @@ spec:
- containerPort: 8080
name: https
protocol: TCP
- containerPort: 9443
name: https-webhook
protocol: TCP
serviceAccountName: {{ include "k3k.serviceAccountName" . }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: k3k-webhook
labels:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: 443
protocol: TCP
name: https-webhook
targetPort: 9443
selector:
{{- include "k3k.selectorLabels" . | nindent 6 }}

View File

@@ -56,7 +56,7 @@ controller:
server:
imagePullSecrets: []
image:
registry:
registry:
repository: "rancher/k3s"
pullPolicy: ""
@@ -74,6 +74,8 @@ agent:
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
# configuration related to agent in virtual mode
virtual:

View File

@@ -16,6 +16,7 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
var keepData bool
@@ -61,6 +62,11 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
return err
}
// skip removing webhook secret
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
return err
}
} else {
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}

View File

@@ -1,6 +1,7 @@
processor:
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
ignoreFields:
- "status$"
- "TypeMeta$"
render:

View File

@@ -61,15 +61,6 @@ _Appears In:_
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
| *`serverAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`agentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. + | |
| *`runtimeClassName`* __string__ | SecurityContext specifies custom SecurityContext to be added +
to the agent and server pods of the cluster in virtual or shared mode. + | |
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | RuntimeClassName specifies alternative runtime class for the +
agent and server pods of the cluster in virtual or shared mode. + | |
|===
@@ -96,7 +87,6 @@ _Appears In:_
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]__ | Spec defines the desired state of the Cluster. + | { } |
| *`status`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]__ | Status reflects the observed state of the Cluster. + | { } |
|===
@@ -211,53 +201,15 @@ Example: ["--node-name=my-agent-node"] + | |
| *`addons`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon[$$Addon$$] array__ | Addons specifies secrets containing raw YAML to deploy on cluster startup. + | |
| *`serverLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | ServerLimit specifies resource limits for server nodes. + | |
| *`workerLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | WorkerLimit specifies resource limits for agent nodes. + | |
| *`serverAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | ServerAffinity specifies the affinity rules for server pods. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`agentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | AgentAffinity specifies the affinity rules for agent pods. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`mirrorHostNodes`* __boolean__ | MirrorHostNodes controls whether node objects from the host cluster +
are mirrored into the virtual cluster. + | |
| *`customCAs`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]__ | CustomCAs specifies the cert/key pairs for custom CA certificates. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
| *`secretMounts`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretmount[$$SecretMount$$] array__ | SecretMounts specifies a list of secrets to mount into server and agent pods. +
Each entry defines a secret and its mount path within the pods. + | |
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | SecurityContext specifies custom SecurityContext to be added +
to the agent and server pods of the cluster in virtual or shared mode. +
This option will override the SecurityContext set by default for virtual mode. + | |
| *`runtimeClassName`* __string__ | RuntimeClassName specifies alternative runtime class for the +
agent and server pods of the cluster in virtual or shared mode. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus"]
=== ClusterStatus
ClusterStatus reflects the observed state of a Cluster.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`hostVersion`* __string__ | HostVersion is the Kubernetes version of the host node. + | |
| *`clusterCIDR`* __string__ | ClusterCIDR is the CIDR range for pod IPs. + | |
| *`serviceCIDR`* __string__ | ServiceCIDR is the CIDR range for service IPs. + | |
| *`clusterDNS`* __string__ | ClusterDNS is the IP address for the CoreDNS service. + | |
| *`tlsSANs`* __string array__ | TLSSANs specifies subject alternative names for the K3s server certificate. + | |
| *`policyName`* __string__ | PolicyName specifies the virtual cluster policy name bound to the virtual cluster. + | |
| *`policy`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]__ | policy represents the status of the policy applied to this cluster. +
This field is set by the VirtualClusterPolicy controller. + | |
| *`kubeletPort`* __integer__ | KubeletPort specefies the port used by k3k-kubelet in shared mode. + | |
| *`conditions`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta[$$Condition$$] array__ | Conditions are the individual conditions for the cluster set. + | |
| *`phase`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterphase[$$ClusterPhase$$]__ | Phase is a high-level summary of the cluster's current lifecycle state. + | Unknown | Enum: [Pending Provisioning Ready Failed Terminating Unknown] +
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig"]
@@ -265,7 +217,7 @@ This field is set by the VirtualClusterPolicy controller. + | |
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
ConfigMapSyncConfig specifies the sync options for services.
@@ -400,7 +352,7 @@ _Appears In:_
IngressSyncConfig specifies the sync options for Ingresses.
IngressSyncConfig specifies the sync options for services.
@@ -414,8 +366,6 @@ _Appears In:_
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
| *`disableTLSSecretTranslation`* __boolean__ | DisableTLSSecretTranslation is an on/off switch for translating TLS secrets +
from virtual cluster to host cluster + | false |
|===
@@ -513,7 +463,7 @@ _Appears In:_
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
PersistentVolumeClaimSyncConfig specifies the sync options for services.
@@ -551,7 +501,7 @@ _Appears In:_
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
PriorityClassSyncConfig specifies the sync options for services.
@@ -618,7 +568,7 @@ This can be 'server', 'agent', or 'all' (for both). + | | Enum: [server agent a
SecretSyncConfig specifies the sync options for Secrets.
SecretSyncConfig specifies the sync options for services.
@@ -640,7 +590,7 @@ then all resources of the given type will be synced. + | |
ServiceSyncConfig specifies the sync options for Services.
ServiceSyncConfig specifies the sync options for services.
@@ -657,28 +607,6 @@ then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig"]
=== StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
=== SyncConfig
@@ -690,7 +618,6 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy[$$AppliedPolicy$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
@@ -703,7 +630,6 @@ _Appears In:_
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
| *`storageClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-storageclasssyncconfig[$$StorageClassSyncConfig$$]__ | StorageClasses resources sync configuration. + | { enabled:false } |
|===
@@ -729,7 +655,6 @@ _Appears In:_
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]__ | Spec defines the desired state of the VirtualClusterPolicy. + | { } |
| *`status`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicystatus[$$VirtualClusterPolicyStatus$$]__ | Status reflects the observed state of the VirtualClusterPolicy. + | |
|===
@@ -776,43 +701,14 @@ _Appears In:_
to set defaults and constraints (min/max) + | |
| *`defaultNodeSelector`* __object (keys:string, values:string)__ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. + | |
| *`defaultPriorityClass`* __string__ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. + | |
| *`defaultServerAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`defaultAgentAffinity`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core[$$Affinity$$]__ | DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace. +
This includes both node affinity and pod affinity/anti-affinity rules. + | |
| *`allowedMode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". + | shared | Enum: [shared virtual] +
| *`disableNetworkPolicy`* __boolean__ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. + | |
| *`podSecurityAdmissionLevel`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel[$$PodSecurityAdmissionLevel$$]__ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. + | | Enum: [privileged baseline restricted] +
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
| *`runtimeClassName`* __string__ | SecurityContext specifies custom SecurityContext to be added +
to the agent and server pods of the cluster in virtual or shared mode. + | |
| *`securityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core[$$SecurityContext$$]__ | RuntimeClassName specifies alternative runtime class for the +
agent and server pods of the cluster in virtual or shared mode. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicystatus"]
=== VirtualClusterPolicyStatus
VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`observedGeneration`* __integer__ | ObservedGeneration was the generation at the time the status was updated. + | |
| *`lastUpdateTime`* __string__ | LastUpdate is the timestamp when the status was last updated. + | |
| *`summary`* __string__ | Summary is a summary of the status. + | |
| *`conditions`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta[$$Condition$$] array__ | Conditions are the individual conditions for the cluster set. + | |
|===

View File

@@ -48,11 +48,6 @@ _Appears in:_
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
| `serverAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `agentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `sync` _[SyncConfig](#syncconfig)_ | sync is the SyncConfig enforced by the active VirtualClusterPolicy. | | |
| `runtimeClassName` _string_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode. | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
#### Cluster
@@ -74,7 +69,6 @@ _Appears in:_
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
| `status` _[ClusterStatus](#clusterstatus)_ | Status reflects the observed state of the Cluster. | \{ \} | |
#### ClusterList
@@ -156,46 +150,19 @@ _Appears in:_
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
| `serverAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | ServerAffinity specifies the affinity rules for server pods.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `agentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | AgentAffinity specifies the affinity rules for agent pods.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
| `secretMounts` _[SecretMount](#secretmount) array_ | SecretMounts specifies a list of secrets to mount into server and agent pods.<br />Each entry defines a secret and its mount path within the pods. | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode.<br />This option will override the SecurityContext set by default for virtual mode. | | |
| `runtimeClassName` _string_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
#### ClusterStatus
ClusterStatus reflects the observed state of a Cluster.
_Appears in:_
- [Cluster](#cluster)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `hostVersion` _string_ | HostVersion is the Kubernetes version of the host node. | | |
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs. | | |
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs. | | |
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service. | | |
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
| `policyName` _string_ | PolicyName specifies the virtual cluster policy name bound to the virtual cluster. | | |
| `policy` _[AppliedPolicy](#appliedpolicy)_ | policy represents the status of the policy applied to this cluster.<br />This field is set by the VirtualClusterPolicy controller. | | |
| `kubeletPort` _integer_ | KubeletPort specefies the port used by k3k-kubelet in shared mode. | | |
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | Conditions are the individual conditions for the cluster set. | | |
| `phase` _[ClusterPhase](#clusterphase)_ | Phase is a high-level summary of the cluster's current lifecycle state. | Unknown | Enum: [Pending Provisioning Ready Failed Terminating Unknown] <br /> |
#### ConfigMapSyncConfig
ConfigMapSyncConfig specifies the sync options for ConfigMaps.
ConfigMapSyncConfig specifies the sync options for services.
@@ -303,7 +270,7 @@ _Appears in:_
IngressSyncConfig specifies the sync options for Ingresses.
IngressSyncConfig specifies the sync options for services.
@@ -314,7 +281,6 @@ _Appears in:_
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
| `disableTLSSecretTranslation` _boolean_ | DisableTLSSecretTranslation is an on/off switch for translating TLS secrets<br />from virtual cluster to host cluster | false | |
#### LoadBalancerConfig
@@ -386,7 +352,7 @@ _Appears in:_
PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
PersistentVolumeClaimSyncConfig specifies the sync options for services.
@@ -417,7 +383,7 @@ _Appears in:_
PriorityClassSyncConfig specifies the sync options for PriorityClasses.
PriorityClassSyncConfig specifies the sync options for services.
@@ -457,7 +423,7 @@ _Appears in:_
SecretSyncConfig specifies the sync options for Secrets.
SecretSyncConfig specifies the sync options for services.
@@ -474,7 +440,7 @@ _Appears in:_
ServiceSyncConfig specifies the sync options for Services.
ServiceSyncConfig specifies the sync options for services.
@@ -487,23 +453,6 @@ _Appears in:_
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### StorageClassSyncConfig
StorageClassSyncConfig specifies the sync options for StorageClasses.
_Appears in:_
- [SyncConfig](#syncconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
#### SyncConfig
@@ -513,7 +462,6 @@ SyncConfig will contain the resources that should be synced from virtual cluster
_Appears in:_
- [AppliedPolicy](#appliedpolicy)
- [ClusterSpec](#clusterspec)
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
@@ -525,7 +473,6 @@ _Appears in:_
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
| `storageClasses` _[StorageClassSyncConfig](#storageclasssyncconfig)_ | StorageClasses resources sync configuration. | \{ enabled:false \} | |
#### VirtualClusterPolicy
@@ -546,7 +493,6 @@ _Appears in:_
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
| `status` _[VirtualClusterPolicyStatus](#virtualclusterpolicystatus)_ | Status reflects the observed state of the VirtualClusterPolicy. | | |
#### VirtualClusterPolicyList
@@ -584,32 +530,11 @@ _Appears in:_
| `limit` _[LimitRangeSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core)_ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy<br />to set defaults and constraints (min/max) | | |
| `defaultNodeSelector` _object (keys:string, values:string)_ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. | | |
| `defaultPriorityClass` _string_ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. | | |
| `defaultServerAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `defaultAgentAffinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#affinity-v1-core)_ | DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace.<br />This includes both node affinity and pod affinity/anti-affinity rules. | | |
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
| `runtimeClassName` _string_ | SecurityContext specifies custom SecurityContext to be added<br />to the agent and server pods of the cluster in virtual or shared mode. | | |
| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#securitycontext-v1-core)_ | RuntimeClassName specifies alternative runtime class for the<br />agent and server pods of the cluster in virtual or shared mode. | | |
#### VirtualClusterPolicyStatus
VirtualClusterPolicyStatus reflects the observed state of a VirtualClusterPolicy.
_Appears in:_
- [VirtualClusterPolicy](#virtualclusterpolicy)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `observedGeneration` _integer_ | ObservedGeneration was the generation at the time the status was updated. | | |
| `lastUpdateTime` _string_ | LastUpdate is the timestamp when the status was last updated. | | |
| `summary` _string_ | Summary is a summary of the status. | | |
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | Conditions are the individual conditions for the cluster set. | | |

View File

@@ -167,7 +167,7 @@ kind: Cluster
metadata:
name: k3kcluster-custom-k8s
spec:
version: "v1.35.2-k3s1"
version: "v1.33.1-k3s1"
```
This sets the virtual cluster's Kubernetes version explicitly.
@@ -178,7 +178,7 @@ This sets the virtual cluster's Kubernetes version explicitly.
```sh
k3kcli cluster create \
--version v1.35.2-k3s1 \
--version v1.33.1-k3s1 \
k3kcluster-custom-k8s
```

127
go.mod
View File

@@ -2,15 +2,15 @@ module github.com/rancher/k3k
go 1.25.0
toolchain go1.25.9
toolchain go1.25.8
require (
github.com/blang/semver/v4 v4.0.0
github.com/go-logr/logr v1.4.3
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.28.1
github.com/onsi/gomega v1.39.1
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.36.0
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.4
github.com/spf13/cobra v1.10.2
@@ -19,35 +19,35 @@ require (
github.com/stretchr/testify v1.11.1
github.com/testcontainers/testcontainers-go v0.41.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0
github.com/virtual-kubelet/virtual-kubelet v1.12.0
go.etcd.io/etcd/api/v3 v3.6.10
go.etcd.io/etcd/client/v3 v3.6.10
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
go.etcd.io/etcd/api/v3 v3.5.21
go.etcd.io/etcd/client/v3 v3.5.21
go.uber.org/zap v1.27.1
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.20.1
k8s.io/api v0.35.3
k8s.io/apiextensions-apiserver v0.35.3
k8s.io/apimachinery v0.35.3
k8s.io/apiserver v0.35.3
k8s.io/cli-runtime v0.35.3
k8s.io/client-go v0.35.3
k8s.io/component-base v0.35.3
k8s.io/component-helpers v0.35.3
k8s.io/kubectl v0.35.3
k8s.io/kubelet v0.35.3
k8s.io/kubernetes v1.35.3
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5
sigs.k8s.io/controller-runtime v0.23.3
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.18.5
k8s.io/api v0.33.7
k8s.io/apiextensions-apiserver v0.33.7
k8s.io/apimachinery v0.33.7
k8s.io/apiserver v0.33.7
k8s.io/cli-runtime v0.33.7
k8s.io/client-go v0.33.7
k8s.io/component-base v0.33.7
k8s.io/component-helpers v0.33.7
k8s.io/kubectl v0.33.7
k8s.io/kubelet v0.33.7
k8s.io/kubernetes v1.33.7
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.19.4
)
require (
cel.dev/expr v0.25.1 // indirect
dario.cat/mergo v1.0.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -67,21 +67,21 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
github.com/cyphar/filepath-securejoin v0.5.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v28.5.2+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/ebitengine/purego v0.10.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -96,16 +96,17 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/google/cel-go v0.23.2 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
@@ -137,7 +138,7 @@ require (
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -149,11 +150,11 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.67.4 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/rubenv/sql-migrate v1.8.1 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
@@ -169,50 +170,50 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.10 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.43.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.43.0 // indirect
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
go.opentelemetry.io/otel/trace v1.43.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.41.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
go.opentelemetry.io/otel/metric v1.41.0 // indirect
go.opentelemetry.io/otel/sdk v1.41.0 // indirect
go.opentelemetry.io/otel/trace v1.41.0 // indirect
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.48.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.33.0 // indirect
golang.org/x/net v0.51.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/sys v0.42.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/term v0.40.0 // indirect
golang.org/x/text v0.35.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.42.0 // indirect
golang.org/x/text v0.34.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.41.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/grpc v1.79.3 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
k8s.io/controller-manager v0.35.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/controller-manager v0.33.7 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.35.3 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/kms v0.33.7 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
oras.land/oras-go/v2 v2.6.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kustomize/api v0.19.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.5.0 // indirect
)

313
go.sum
View File

@@ -10,16 +10,16 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
@@ -74,8 +74,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -104,36 +104,30 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0=
github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
@@ -163,13 +157,11 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@@ -188,10 +180,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -199,13 +191,16 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -220,14 +215,13 @@ github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -243,12 +237,10 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -278,8 +270,6 @@ github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8S
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -292,8 +282,6 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
@@ -323,9 +311,8 @@ github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFL
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
@@ -334,10 +321,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -357,15 +344,15 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rancher/dynamiclistener v1.27.5 h1:FA/s9vbQzGz1Au3BuFvdbBfBBUmHGXGR3xoliwR4qfY=
github.com/rancher/dynamiclistener v1.27.5/go.mod h1:VqBaJNi+bZmre0+gi+2Jb6jbn7ovHzRueW+M7QhVKsk=
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
@@ -376,8 +363,8 @@ github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0=
github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8=
github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
@@ -429,22 +416,14 @@ github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9
github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI=
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0 h1:xvnllztzVajAMUmeb1BD6XQNacoY4gNEN6Gl1fhzUMI=
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0/go.mod h1:y9YF71J/D1tIoIY09dmtwEXPiHmuvntbK+MWuypq8OQ=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/virtual-kubelet/virtual-kubelet v1.12.0 h1:6RnRE3egGnqw3BDL9PBbP5DPV6OaXC2h/nfq5c7VsF4=
github.com/virtual-kubelet/virtual-kubelet v1.12.0/go.mod h1:dVlVEsFfrrwAcj/v0eDGgkTF5r+eAsBnn9gDxx3au2s=
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803 h1:0O149bxUoQL69b4+pcGaCbKk2bvA/43AhkczkDuRjMc=
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803/go.mod h1:SHfH2bqArcMTBh/JejdbtsyZwmYYqkpJnABOyipjT54=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
@@ -455,20 +434,22 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
go.etcd.io/etcd/api/v3 v3.6.10 h1:jlwjtELjA8yi2VWpOFH+0w0lGr3K6mVDyn0RDB9aaAY=
go.etcd.io/etcd/api/v3 v3.6.10/go.mod h1:pdV4VeFmvhdNjB4LWRkC8ReLyRBAxUOze3GarMhE2sk=
go.etcd.io/etcd/client/pkg/v3 v3.6.10 h1:tBT7podcPhuVbCVkAEzx8bC5I+aqxfLwBN8/As1arrA=
go.etcd.io/etcd/client/pkg/v3 v3.6.10/go.mod h1:WEy3PpwbbEBVRdh1NVJYsuUe/8eyI21PNJRazeD8z/Y=
go.etcd.io/etcd/client/v3 v3.6.10 h1:J598zJ+C/ZPvImypmq5waj84+bovePrlZERHklf34y0=
go.etcd.io/etcd/client/v3 v3.6.10/go.mod h1:iHhUDUcEwaKs1YFq3MgmI9U4zhTVasp/vgdVbFf1RS8=
go.etcd.io/etcd/pkg/v3 v3.6.5 h1:byxWB4AqIKI4SBmquZUG1WGtvMfMaorXFoCcFbVeoxM=
go.etcd.io/etcd/pkg/v3 v3.6.5/go.mod h1:uqrXrzmMIJDEy5j00bCqhVLzR5jEJIwDp5wTlLwPGOU=
go.etcd.io/etcd/server/v3 v3.6.5 h1:4RbUb1Bd4y1WkBHmuF+cZII83JNQMuNXzyjwigQ06y0=
go.etcd.io/etcd/server/v3 v3.6.5/go.mod h1:PLuhyVXz8WWRhzXDsl3A3zv/+aK9e4A9lpQkqawIaH0=
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
@@ -477,12 +458,12 @@ go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGh
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
@@ -491,10 +472,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7Z
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY=
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
@@ -507,26 +488,26 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8=
go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y=
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -542,8 +523,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -553,8 +534,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
@@ -563,8 +544,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -576,16 +557,16 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -594,8 +575,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -609,6 +590,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
@@ -634,8 +617,8 @@ google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
@@ -649,57 +632,59 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
helm.sh/helm/v3 v3.20.1 h1:T8PodUaH1UwNvE+imUA2mIKjJItY8g7CVvLVP5g4NzI=
helm.sh/helm/v3 v3.20.1/go.mod h1:Fl1kBaWCpkUrM6IYXPjQ3bdZQfFrogKArqptvueZ6Ww=
helm.sh/helm/v3 v3.18.5 h1:Cc3Z5vd6kDrZq9wO9KxKLNEickiTho6/H/dBNRVSos4=
helm.sh/helm/v3 v3.18.5/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ=
k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4=
k8s.io/apiextensions-apiserver v0.35.3 h1:2fQUhEO7P17sijylbdwt0nBdXP0TvHrHj0KeqHD8FiU=
k8s.io/apiextensions-apiserver v0.35.3/go.mod h1:tK4Kz58ykRpwAEkXUb634HD1ZAegEElktz/B3jgETd8=
k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8=
k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/apiserver v0.35.3 h1:D2eIcfJ05hEAEewoSDg+05e0aSRwx8Y4Agvd/wiomUI=
k8s.io/apiserver v0.35.3/go.mod h1:JI0n9bHYzSgIxgIrfe21dbduJ9NHzKJ6RchcsmIKWKY=
k8s.io/cli-runtime v0.35.3 h1:UZq4ipNimtzBmhN7PPKbfAdqo8quK0H0UdGl6qAQnqI=
k8s.io/cli-runtime v0.35.3/go.mod h1:O7MUmCqcKSd5xI+O5X7/pRkB5l0O2NIhOdUVwbHLXu4=
k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg=
k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c=
k8s.io/component-base v0.35.3 h1:mbKbzoIMy7JDWS/wqZobYW1JDVRn/RKRaoMQHP9c4P0=
k8s.io/component-base v0.35.3/go.mod h1:IZ8LEG30kPN4Et5NeC7vjNv5aU73ku5MS15iZyvyMYk=
k8s.io/component-helpers v0.35.3 h1:Rl2p3wNMC0YU21rziLkWXavr7MwkB5Td3lNZ/+gYGm8=
k8s.io/component-helpers v0.35.3/go.mod h1:8BkyfcBA6XsCtFYxDB+mCfZqM6P39Aco12AKigNn0C8=
k8s.io/controller-manager v0.35.1 h1:AKMrGk8sCDa0WtLh+8yfcKck3r/AVw60FOmpak/4fB0=
k8s.io/controller-manager v0.35.1/go.mod h1:ifoFum/gxonT7duRuSrNQxU7bctlStGMXraZP5xbaso=
k8s.io/api v0.33.7 h1:Koh06KurzmXwCwe/DOaIiM1A8vEXTZ6B1tTDnmLLfxw=
k8s.io/api v0.33.7/go.mod h1:pu6qwFzTj0ijPbNYAbMgLFDEWgLFu2VUB6PVvQNtswc=
k8s.io/apiextensions-apiserver v0.33.7 h1:1J3CO3vsa645qKuhN8vdB1x3If5vuyH3uAWtLXZKkuQ=
k8s.io/apiextensions-apiserver v0.33.7/go.mod h1:WVsg48xGoaWz9vAREcbjfJqxFd1tpOcZoFutFBVC4DI=
k8s.io/apimachinery v0.33.7 h1:f1kF3V+Stdr+2IGB8QhrfZ6J9JkXF6e1gWX2wKP5slU=
k8s.io/apimachinery v0.33.7/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apiserver v0.33.7 h1:A+3bpgxp9PUy8SEqVCrq5BoFxwUujYYwkrTXpv621cU=
k8s.io/apiserver v0.33.7/go.mod h1:d7/iHfHmI7WF+z+xuMi+O1osC1lHv6irtPua/7yVPto=
k8s.io/cli-runtime v0.33.7 h1:WeWuUlmE8qB0g2vq1wTr5vYaVO2745VJZ3aP/U95OF0=
k8s.io/cli-runtime v0.33.7/go.mod h1:9QK4Lcj/nm2vM61pRLinzXbNsxvOZ0XC7dVGoMhm85I=
k8s.io/client-go v0.33.7 h1:sEcU4syZnbwaiGDctJE6G/IKsuays3wjEWGuyrD7M8c=
k8s.io/client-go v0.33.7/go.mod h1:0MEM10zY5dGdc3FdkyNCTKXiTr8P+2Vj65njzvE0Vhw=
k8s.io/component-base v0.33.7 h1:r3xd2l2lngeiOrQhpnD7CYtgbbrTDBnO3qyDUUfwTXw=
k8s.io/component-base v0.33.7/go.mod h1:3v7hH1NvNLID9BUBAR/FqM9StQ/Sa4yBDxEzE1yvGFg=
k8s.io/component-helpers v0.33.7 h1:m23GzIX36RHfKbumTQig8eobBMK7JG0iSekRGEFa1bs=
k8s.io/component-helpers v0.33.7/go.mod h1:RZw7qlcJdIYnoN7KPKGsVSaSgFPKM7xN2IcAdMX0uZ0=
k8s.io/controller-manager v0.33.7 h1:AATKJiqBhRc7IMH7KAWdem2xa2VRduAcVArdp5D04A8=
k8s.io/controller-manager v0.33.7/go.mod h1:IUJSup98WchXED3L/29z+WJAz2sEi10TC5s0obajkT0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.35.3 h1:jaxr/7dNqcztGldnfCEZg8DegEOnHV6cfoBC2ACMWEg=
k8s.io/kms v0.35.3/go.mod h1:VT+4ekZAdrZDMgShK37vvlyHUVhwI9t/9tvh0AyCWmQ=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/kubectl v0.35.3 h1:1KqSYXk/sodU7VeDvK6atX2kAGUZd2QTeR5K7Hb9r9w=
k8s.io/kubectl v0.35.3/go.mod h1:GPHxZqRe+u/i3gTBoVQHeIyq2NilfNPj9hDWeuN3x5s=
k8s.io/kubelet v0.35.3 h1:Y6b9+U/aTBmou9JZ6qv18O4dpFbJOfl7cBe+ZksT7RY=
k8s.io/kubelet v0.35.3/go.mod h1:aWoMogtyUEf/mTl8VjqHbSkW5ZZkB8vTkrg9Fi6TKwE=
k8s.io/kubernetes v1.35.3 h1:J3dk2wybKFHwoH4eydDUGHJo4HAD+9CZbSlvk/YQuao=
k8s.io/kubernetes v1.35.3/go.mod h1:AaPpCpiS8oAqRbEwpY5r3RitLpwpVp5lVXKFkJril58=
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbeZuxoSGOX/J+aYM=
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
k8s.io/kms v0.33.7 h1:ckQz1NkobzSVXaiDi064exY+G5lUseiwsq8m/bXTcPo=
k8s.io/kms v0.33.7/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kubectl v0.33.7 h1:qsiKBslMDfcSkvBsEpLKju7n9ZyCFcUkZ8lAq+jexVA=
k8s.io/kubectl v0.33.7/go.mod h1:4gHw8yanjdPbUGgEO0c9UVvLvOOY1UJ2la8T7Aq7EPc=
k8s.io/kubelet v0.33.7 h1:huNa5PQjUpFskjD2Q9Q+96Hk2+nzkY+T6EiB5k6p5sY=
k8s.io/kubelet v0.33.7/go.mod h1:QHPXSFQ4zeU2cvlxE3LliKcU0Mvy7ZcDTzbAPzonscc=
k8s.io/kubernetes v1.33.7 h1:Qhp1gwCPSOqt3du6A0uTGrrTcZDtShdSCIR5IZag16Y=
k8s.io/kubernetes v1.33.7/go.mod h1:eJiHC143tnNSvmDkCRwGNKA80yXqBvYC3U8L/i67nAY=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.23.3 h1:VjB/vhoPoA9l1kEKZHBMnQF33tdCLQKJtydy4iqwZ80=
sigs.k8s.io/controller-runtime v0.23.3/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo=
sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ=
sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o=
sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA=
sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs=
sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=

View File

@@ -14,6 +14,7 @@ type config struct {
HostKubeconfig string `mapstructure:"hostKubeconfig"`
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
KubeletPort int `mapstructure:"kubeletPort"`
WebhookPort int `mapstructure:"webhookPort"`
ServerIP string `mapstructure:"serverIP"`
Version string `mapstructure:"version"`
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`

View File

@@ -92,20 +92,11 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, err
}
appliedSync := cluster.Spec.Sync.DeepCopy()
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
appliedSync = cluster.Status.Policy.Sync
}
syncConfig := appliedSync.Ingresses
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedIngress := r.ingress(&virtIngress, syncConfig.DisableTLSSecretTranslation)
syncedIngress := r.ingress(&virtIngress)
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
@@ -152,7 +143,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
}
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress, disableTLSSecretTranslation bool) *networkingv1.Ingress {
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
hostIngress := obj.DeepCopy()
s.Translator.TranslateTo(hostIngress)
@@ -166,17 +157,6 @@ func (s *IngressReconciler) ingress(obj *networkingv1.Ingress, disableTLSSecretT
}
}
}
// TLS Secret translation disable, return early without translating TLS secrets in the ingress spec
if disableTLSSecretTranslation {
return hostIngress
}
// ensure tls secrets are also translated
for i := range hostIngress.Spec.TLS {
if hostIngress.Spec.TLS[i].SecretName != "" {
hostIngress.Spec.TLS[i].SecretName = s.Translator.TranslateName(obj.GetNamespace(), hostIngress.Spec.TLS[i].SecretName)
}
}
// don't sync finalizers to the host
return hostIngress
}

View File

@@ -2,35 +2,120 @@ package webhook
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/manager"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
const webhookName = "podmutating.k3k.io"
const (
webhookName = "podmutating.k3k.io"
webhookTimeout = int32(10)
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
)
func RemovePodMutatingWebhook(ctx context.Context, virtualClient, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) error {
webhookSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: controller.SafeConcatNameWithPrefix(clusterName, "webhook"),
Namespace: clusterNamespace,
},
type webhookHandler struct {
client ctrlruntimeclient.Client
scheme *runtime.Scheme
serviceName string
clusterName string
clusterNamespace string
logger logr.Logger
webhookPort int
}
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
logger: logger,
serviceName: serviceName,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
webhookPort: webhookPort,
}
if err := hostClient.Delete(ctx, webhookSecret); !apierrors.IsNotFound(err) {
// create mutating webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
}
webhook := &admissionregistrationv1.MutatingWebhookConfiguration{
if err := handler.client.Create(ctx, config); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// register webhook with the manager
return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete()
}
func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error {
pod, ok := obj.(*v1.Pod)
if !ok {
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
for i, container := range pod.Spec.Containers {
for j, env := range container.Env {
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
continue
}
fieldPath := env.ValueFrom.FieldRef.FieldPath
if strings.Contains(fieldPath, "status.") {
annotationKey := fmt.Sprintf("%s_%d_%s", FieldpathField, i, env.Name)
pod.Annotations[annotationKey] = fieldPath
pod.Spec.Containers[i].Env = removeEnv(pod.Spec.Containers[i].Env, j)
}
}
}
return nil
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Info("extracting webhook tls from host cluster")
var webhookTLSSecret v1.Secret
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
return nil, err
}
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
if !ok {
return nil, errors.New("webhook CABundle does not exist in secret")
}
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: "admissionregistration.k8s.io/v1",
Kind: "MutatingWebhookConfiguration",
@@ -38,11 +123,51 @@ func RemovePodMutatingWebhook(ctx context.Context, virtualClient, hostClient ctr
ObjectMeta: metav1.ObjectMeta{
Name: webhookName + "-configuration",
},
}
if err := virtualClient.Delete(ctx, webhook); !apierrors.IsNotFound(err) {
return err
}
return nil
Webhooks: []admissionregistrationv1.MutatingWebhook{
{
Name: webhookName,
AdmissionReviewVersions: []string{"v1"},
SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone),
TimeoutSeconds: ptr.To(webhookTimeout),
ClientConfig: admissionregistrationv1.WebhookClientConfig{
URL: ptr.To(webhookURL),
CABundle: caBundle,
},
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: []admissionregistrationv1.OperationType{
"CREATE",
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods"},
Scope: ptr.To(admissionregistrationv1.NamespacedScope),
},
},
},
},
},
}, nil
}
func removeEnv(envs []v1.EnvVar, i int) []v1.EnvVar {
envs[i] = envs[len(envs)-1]
return envs[:len(envs)-1]
}
func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
s := strings.SplitN(annotationKey, "_", 3)
if len(s) != 3 {
return -1, "", errors.New("fieldpath annotation is not set correctly")
}
containerIndex, err := strconv.Atoi(s[1])
if err != nil {
return -1, "", err
}
envName := s[2]
return containerIndex, envName, nil
}

View File

@@ -2,8 +2,13 @@ package main
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/go-logr/logr"
@@ -21,7 +26,9 @@ import (
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
certutil "github.com/rancher/dynamiclistener/cert"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@@ -64,7 +71,7 @@ type kubelet struct {
token string
}
func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
@@ -77,7 +84,7 @@ func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
return nil, err
}
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token)
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
if err != nil {
return nil, err
}
@@ -87,6 +94,8 @@ func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
return nil, err
}
ctrl.SetLogger(logger)
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
@@ -119,8 +128,14 @@ func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
}
webhookServer := webhook.NewServer(webhook.Options{
CertDir: "/opt/rancher/k3k-webhook",
Port: c.WebhookPort,
})
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
Scheme: virtualScheme,
WebhookServer: webhookServer,
LeaderElection: true,
LeaderElectionNamespace: "kube-system",
LeaderElectionID: c.ClusterName,
@@ -132,10 +147,10 @@ func newKubelet(ctx context.Context, c *config) (*kubelet, error) {
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("removing pod mutating webhook")
logger.Info("adding pod mutating webhook")
if err := k3kwebhook.RemovePodMutatingWebhook(ctx, virtualMgr.GetClient(), hostClient, c.ClusterName, c.ClusterNamespace); err != nil {
return nil, errors.New("unable to remove pod mutating webhook for virtual cluster: " + err.Error())
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
}
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
@@ -193,6 +208,20 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
if err != nil {
return errors.New("unable to start kubelet: " + err.Error())
}
return nil
}
func (k *kubelet) start(ctx context.Context) {
// any one of the following 3 tasks (host manager, virtual manager, node) crashing will stop the
// program, and all 3 of them block on start, so we start them here in go-routines
@@ -241,24 +270,46 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
err = provider.ConfigureNode(
provider.ConfigureNode(
k.logger,
pc.Node,
cfg.AgentHostname,
k.port,
k.agentIP,
utilProvider.Host.Manager,
utilProvider.Virtual.Client,
k.hostMgr,
utilProvider.VirtualClient,
k.virtualCluster,
cfg.Version,
cfg.MirrorHostNodes,
)
return utilProvider, &provider.Node{}, err
return utilProvider, &provider.Node{}, nil
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string) (*rest.Config, error) {
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
mux := http.NewServeMux()
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
return errors.New("unable to attach routes: " + err.Error())
}
c.Handler = mux
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
c.TLSConfig = tlsConfig
return nil
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
@@ -330,6 +381,60 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
return clientcmd.Write(*config)
}
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
// POD IP
podIP := net.ParseIP(os.Getenv("POD_IP"))
ip := net.ParseIP(agentIP)
altNames := certutil.AltNames{
DNSNames: []string{hostname},
IPs: []net.IP{ip, podIP},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
if err != nil {
return nil, errors.New("unable to get cert and key: " + err.Error())
}
clientCert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, errors.New("unable to get key pair: " + err.Error())
}
// create rootCA CertPool
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
if err != nil {
return nil, errors.New("unable to create ca certs: " + err.Error())
}
if len(certs) < 1 {
return nil, errors.New("ca cert is not parsed correctly")
}
pool := x509.NewCertPool()
pool.AddCert(certs[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
var cluster v1beta1.Cluster

View File

@@ -1,6 +1,7 @@
package main
import (
"context"
"errors"
"fmt"
"os"
@@ -13,7 +14,7 @@ import (
"github.com/spf13/pflag"
"github.com/spf13/viper"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/pkg/log"
)
@@ -36,7 +37,7 @@ func main() {
}
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrl.SetLogger(logger)
ctrlruntimelog.SetLogger(logger)
return nil
},
@@ -51,6 +52,7 @@ func main() {
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
@@ -64,20 +66,18 @@ func main() {
}
func run(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
ctx := context.Background()
if err := cfg.validate(); err != nil {
return fmt.Errorf("failed to validate config: %w", err)
}
k, err := newKubelet(ctx, &cfg)
k, err := newKubelet(ctx, &cfg, logger)
if err != nil {
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
podIP := os.Getenv("POD_IP")
if err := k.registerNode(k.agentIP, podIP, cfg); err != nil {
if err := k.registerNode(k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}

View File

@@ -1,114 +0,0 @@
package main
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/http"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"k8s.io/client-go/util/retry"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
func (k *kubelet) registerNode(agentIP, podIP string, cfg config) error {
tlsConfig, err := loadTLSConfig(cfg, k.name, k.token, agentIP, podIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
mux := http.NewServeMux()
node, err := nodeutil.NewNode(
k.name,
k.newProviderFunc(cfg),
nodeutil.WithClient(k.virtClient),
nodeutil.AttachProviderRoutes(mux),
nodeOpt(mux, tlsConfig, cfg.KubeletPort),
)
if err != nil {
return errors.New("unable to start kubelet: " + err.Error())
}
k.node = node
return nil
}
func nodeOpt(mux *http.ServeMux, tlsConfig *tls.Config, port int) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.Handler = mux
c.TLSConfig = tlsConfig
c.HTTPListenAddr = fmt.Sprintf(":%d", port)
c.NodeSpec.Labels["kubernetes.io/role"] = "worker"
c.NodeSpec.Labels["node-role.kubernetes.io/worker"] = "true"
c.SkipDownwardAPIResolution = true
return nil
}
}
func loadTLSConfig(cfg config, nodeName, token, agentIP, podIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cfg.ClusterName), cfg.ClusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
altNames := certutil.AltNames{
DNSNames: []string{cfg.AgentHostname},
IPs: []net.IP{
net.ParseIP(agentIP),
net.ParseIP(podIP),
},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
if err != nil {
return nil, errors.New("unable to get cert and key: " + err.Error())
}
clientCert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, errors.New("unable to get key pair: " + err.Error())
}
// create rootCA CertPool
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
if err != nil {
return nil, errors.New("unable to create ca certs: " + err.Error())
}
if len(certs) < 1 {
return nil, errors.New("ca cert is not parsed correctly")
}
pool := x509.NewCertPool()
pool.AddCert(certs[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}

View File

@@ -14,14 +14,13 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) error {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
var hostNode corev1.Node
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
logger.Error(err, "error getting host node for mirroring", "node", node.Name)
return err
logger.Error(err, "error getting host node for mirroring")
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -52,8 +51,6 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
}
return nil
}
// nodeConditions returns the basic conditions which mark the node as ready

View File

@@ -37,6 +37,7 @@ import (
compbasemetrics "k8s.io/component-base/metrics"
v1alpha1stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -46,20 +47,15 @@ import (
// check at compile time if the Provider implements the nodeutil.Provider interface
var _ nodeutil.Provider = (*Provider)(nil)
// ClusterContext includes the controller runtime manager and clients
type ClusterContext struct {
Config rest.Config
Client client.Client
CoreClient cv1.CoreV1Interface
Manager manager.Manager
}
// Provider implements nodetuil.Provider from virtual Kubelet.
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
type Provider struct {
Host ClusterContext
Virtual ClusterContext
Translator translate.ToHostTranslator
HostClient client.Client
VirtualClient client.Client
VirtualManager manager.Manager
ClientConfig rest.Config
CoreClient cv1.CoreV1Interface
ClusterNamespace string
ClusterName string
serverIP string
@@ -76,29 +72,18 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
return nil, err
}
virtualCoreClient, err := cv1.NewForConfig(virtualMgr.GetConfig())
if err != nil {
return nil, err
}
translator := translate.ToHostTranslator{
ClusterName: name,
ClusterNamespace: namespace,
}
p := Provider{
Host: ClusterContext{
Manager: hostMgr,
Client: hostMgr.GetClient(),
CoreClient: coreClient,
Config: hostConfig,
},
Virtual: ClusterContext{
Manager: virtualMgr,
Client: virtualMgr.GetClient(),
CoreClient: virtualCoreClient,
},
HostClient: hostMgr.GetClient(),
VirtualClient: virtualMgr.GetClient(),
VirtualManager: virtualMgr,
Translator: translator,
ClientConfig: hostConfig,
CoreClient: coreClient,
ClusterNamespace: namespace,
ClusterName: name,
logger: logger.WithValues("cluster", name),
@@ -144,7 +129,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, name, contai
options.SinceTime = &sinceTime
}
closer, err := p.Host.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
if err != nil {
logger.Error(err, "Error getting logs from container")
}
@@ -160,7 +145,7 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containe
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
logger.V(1).Info("RunInContainer")
req := p.Host.CoreClient.RESTClient().Post().
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
@@ -175,7 +160,7 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containe
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.Host.Config, http.MethodPost, req.URL())
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
logger.Error(err, "Error creating SPDY executor")
return err
@@ -205,7 +190,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
logger.V(1).Info("AttachToContainer")
req := p.Host.CoreClient.RESTClient().Post().
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
@@ -219,7 +204,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.Host.Config, http.MethodPost, req.URL())
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
logger.Error(err, "Error creating SPDY executor")
return err
@@ -245,13 +230,13 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, conta
func (p *Provider) GetStatsSummary(ctx context.Context) (*v1alpha1stats.Summary, error) {
p.logger.V(1).Info("GetStatsSummary")
node, err := p.Host.CoreClient.Nodes().Get(ctx, p.agentHostname, metav1.GetOptions{})
node, err := p.CoreClient.Nodes().Get(ctx, p.agentHostname, metav1.GetOptions{})
if err != nil {
p.logger.Error(err, "Unable to get nodes of cluster")
return nil, err
}
res, err := p.Host.CoreClient.RESTClient().
res, err := p.CoreClient.RESTClient().
Get().
Resource("nodes").
Name(node.Name).
@@ -338,13 +323,13 @@ func (p *Provider) PortForward(ctx context.Context, namespace, name string, port
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "port", port)
logger.V(1).Info("PortForward")
req := p.Host.CoreClient.RESTClient().Post().
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(&p.Host.Config)
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
if err != nil {
logger.Error(err, "Error creating RoundTripper for PortForward")
return err
@@ -388,7 +373,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
}
var cluster v1beta1.Cluster
if err := p.Host.Client.Get(ctx, clusterKey, &cluster); err != nil {
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
logger.Error(err, "Error getting Virtual Cluster definition")
return err
}
@@ -400,7 +385,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
}
var virtualPod corev1.Pod
if err := p.Virtual.Client.Get(ctx, key, &virtualPod); err != nil {
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
logger.Error(err, "Error getting Pod from Virtual Cluster")
return err
}
@@ -476,6 +461,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
p.configurePodEnvs(hostPod, &virtualPod)
// fieldpath annotations
if err := p.configureFieldPathEnv(&virtualPod, hostPod); err != nil {
logger.Error(err, "Unable to fetch fieldpath annotations for pod")
return err
}
// volumes will often refer to resources in the virtual cluster
// but instead need to refer to the synced host cluster version
p.transformVolumes(pod.Namespace, hostPod.Spec.Volumes)
@@ -494,12 +485,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
configureNetworking(hostPod, virtualPod.Name, virtualPod.Namespace, p.serverIP, p.dnsIP)
// set ownerReference to the cluster object
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.Host.Client.Scheme()); err != nil {
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.HostClient.Scheme()); err != nil {
logger.Error(err, "Unable to set owner reference for pod")
return err
}
if err := p.Host.Client.Create(ctx, hostPod); err != nil {
if err := p.HostClient.Create(ctx, hostPod); err != nil {
logger.Error(err, "Error creating pod on host cluster")
return err
}
@@ -604,14 +595,14 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
}
var hostPod corev1.Pod
if err := p.Host.Client.Get(ctx, hostKey, &hostPod); err != nil {
if err := p.HostClient.Get(ctx, hostKey, &hostPod); err != nil {
logger.Error(err, "Unable to get Pod to update from host cluster")
return err
}
updatePod(&hostPod, pod)
if err := p.Host.Client.Update(ctx, &hostPod); err != nil {
if err := p.HostClient.Update(ctx, &hostPod); err != nil {
logger.Error(err, "Unable to update Pod in host cluster")
return err
}
@@ -622,7 +613,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
hostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.Host.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "Error when updating ephemeral containers in host pod")
return err
}
@@ -640,14 +631,14 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
}
var virtualPod corev1.Pod
if err := p.Virtual.Client.Get(ctx, key, &virtualPod); err != nil {
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
logger.Error(err, "Unable to get pod to update from virtual cluster")
return err
}
updatePod(&virtualPod, pod)
if err := p.Virtual.Client.Update(ctx, &virtualPod); err != nil {
if err := p.VirtualClient.Update(ctx, &virtualPod); err != nil {
logger.Error(err, "Unable to update Pod in virtual cluster")
return err
}
@@ -658,7 +649,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
virtualPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.Host.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "Error when updating ephemeral containers in virtual pod")
return err
}
@@ -707,7 +698,7 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
logger.V(1).Info("DeletePod")
err := p.Host.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
logger.Info("Pod to delete not found in host cluster")
@@ -769,7 +760,7 @@ func (p *Provider) getPodFromHostCluster(ctx context.Context, hostPodName string
}
var pod corev1.Pod
if err := p.Host.Client.Get(ctx, key, &pod); err != nil {
if err := p.HostClient.Get(ctx, key, &pod); err != nil {
return nil, err
}
@@ -797,7 +788,7 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
var podList corev1.PodList
err = p.Host.Client.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
p.logger.Error(err, "Error listing pods from host cluster")
return nil, err
@@ -974,3 +965,32 @@ func (p *Provider) configureEnvFrom(virtualPod *corev1.Pod, envs []corev1.EnvFro
return resultingEnvVars
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
for name, value := range pod.Annotations {
if strings.Contains(name, webhook.FieldpathField) {
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
if err != nil {
return err
}
// re-adding these envs to the pod
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
Name: envName,
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: value,
},
},
})
// removing the annotation from the pod
delete(tPod.Annotations, name)
}
}
return nil
}

View File

@@ -3,14 +3,11 @@ package provider
import (
"context"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
authv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -23,36 +20,22 @@ const (
serviceAccountTokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
)
// transformTokens copies the serviceaccount tokens used by virtualPod's serviceaccount to a secret on the host cluster and mount it
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
logger := p.logger.WithValues("namespace", virtualPod.Namespace, "name", virtualPod.Name, "serviceAccountName", virtualPod.Spec.ServiceAccountName)
logger.V(1).Info("Transforming service account tokens")
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "serviceAccountNameod", pod.Spec.ServiceAccountName)
logger.V(1).Info("Transforming token")
// transform projected service account token
if err := p.transformProjectedTokens(ctx, virtualPod, hostPod); err != nil {
return err
}
// transform kube-api-access token for all containers in virtualPod
if err := p.transformKubeAccessToken(ctx, virtualPod, hostPod); err != nil {
return err
}
return nil
}
func (p *Provider) transformKubeAccessToken(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
if !hasKubeAccessVolume(virtualPod) {
if !isKubeAccessVolumeFound(pod) {
return nil
}
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(virtualPod.Spec.ServiceAccountName, "token")
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
virtualSecret := virtualSecret(virtualSecretName, virtualPod.Namespace, virtualPod.Spec.ServiceAccountName)
if err := p.Virtual.Client.Create(ctx, virtualSecret); err != nil {
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
@@ -63,7 +46,7 @@ func (p *Provider) transformKubeAccessToken(ctx context.Context, virtualPod, hos
Name: virtualSecret.Name,
Namespace: virtualSecret.Namespace,
}
if err := p.Virtual.Client.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
if err := p.VirtualClient.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
return err
}
// To avoid race conditions we need to check if the secret's data has been populated
@@ -72,107 +55,23 @@ func (p *Provider) transformKubeAccessToken(ctx context.Context, virtualPod, hos
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
}
hostSecret, err := p.translateAndCreateHostTokenSecret(ctx, virtualSecret)
if err != nil {
return err
hostSecret := virtualSecret.DeepCopy()
hostSecret.Type = ""
hostSecret.Annotations = make(map[string]string)
p.Translator.TranslateTo(hostSecret)
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
hostPod.Spec.ServiceAccountName = ""
hostPod.Spec.DeprecatedServiceAccount = ""
hostPod.Spec.AutomountServiceAccountToken = ptr.To(false)
removeKubeAccessVolume(hostPod)
addKubeAccessVolume(hostPod, hostSecret.Name)
p.translateToken(tPod, hostSecret.Name)
return nil
}
// transformProjectedTokens will iterate over the host pod projected volume sources
// and transform projected tokens to use a requested token secret from the virtual cluster
// instead the automatically generated secret on the host cluster.
func (p *Provider) transformProjectedTokens(ctx context.Context, virtualPod, hostPod *corev1.Pod) error {
for i, volume := range hostPod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
if volume.Projected == nil {
continue
}
for j, source := range volume.Projected.Sources {
if source.ServiceAccountToken == nil {
continue
}
projectedSecret, err := p.requestTokenSecret(ctx, source.ServiceAccountToken, virtualPod)
if err != nil {
return err
}
hostSecret, err := p.translateAndCreateHostTokenSecret(ctx, projectedSecret)
if err != nil {
return err
}
// replace the projected token volume with a projected secret
hostPod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken = nil
hostPod.Spec.Volumes[i].Projected.Sources[j].Secret = &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
Name: hostSecret.Name,
},
}
}
}
return nil
}
func (p *Provider) requestTokenSecret(ctx context.Context, token *corev1.ServiceAccountTokenProjection, virtualPod *corev1.Pod) (*corev1.Secret, error) {
namespace := virtualPod.Namespace
serviceAccountName := virtualPod.Spec.ServiceAccountName
var audiences []string
if token.Audience != "" {
audiences = []string{token.Audience}
}
tokenRequest := &authv1.TokenRequest{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Namespace: namespace,
},
Spec: authv1.TokenRequestSpec{
Audiences: audiences,
ExpirationSeconds: token.ExpirationSeconds,
BoundObjectRef: &authv1.BoundObjectReference{
Name: virtualPod.Name,
UID: virtualPod.UID,
Kind: "Pod",
APIVersion: "v1",
},
},
}
tokenResp, err := p.Virtual.CoreClient.ServiceAccounts(namespace).CreateToken(ctx, serviceAccountName, tokenRequest, metav1.CreateOptions{})
if err != nil {
return nil, err
}
// create a virtual secret with that token
virtualSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
// creating unique name for the virtual secret based on the request attributes
Name: generateTokenSecretName(serviceAccountName, token.Path, tokenResp),
Namespace: namespace,
},
Data: map[string][]byte{
token.Path: []byte(tokenResp.Status.Token),
},
}
return virtualSecret, nil
}
func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -190,25 +89,17 @@ func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
}
}
func (p *Provider) translateAndCreateHostTokenSecret(ctx context.Context, projectedToken *corev1.Secret) (*corev1.Secret, error) {
hostSecret := projectedToken.DeepCopy()
hostSecret.Type = ""
hostSecret.Annotations = make(map[string]string)
p.Translator.TranslateTo(hostSecret)
data := hostSecret.Data
if _, err := controllerutil.CreateOrUpdate(ctx, p.Host.Client, hostSecret, func() error {
hostSecret.Data = data
return nil
}); err != nil {
return nil, err
}
return hostSecret, nil
// translateToken will remove the serviceaccount from the pod and replace the kube-api-access volume
// with a custom token volume and mount it to all containers within the pod
func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) {
pod.Spec.ServiceAccountName = ""
pod.Spec.DeprecatedServiceAccount = ""
pod.Spec.AutomountServiceAccountToken = ptr.To(false)
removeKubeAccessVolume(pod)
addKubeAccessVolume(pod, hostSecretName)
}
func hasKubeAccessVolume(pod *corev1.Pod) bool {
func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
return true
@@ -280,29 +171,4 @@ func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
MountPath: serviceAccountTokenMountPath,
})
}
for i := range pod.Spec.EphemeralContainers {
pod.Spec.EphemeralContainers[i].VolumeMounts = append(pod.Spec.EphemeralContainers[i].VolumeMounts, corev1.VolumeMount{
Name: tokenVolumeName,
MountPath: serviceAccountTokenMountPath,
})
}
}
func generateTokenSecretName(serviceAccountName, tokenPath string, tokenReq *authv1.TokenRequest) string {
nameComponents := []string{serviceAccountName}
if tokenReq.Spec.Audiences != nil {
nameComponents = append(nameComponents, tokenReq.Spec.Audiences...)
}
if exp := tokenReq.Spec.ExpirationSeconds; exp != nil {
nameComponents = append(nameComponents, strconv.FormatInt(*exp, 10))
}
if tokenPath != "" {
nameComponents = append(nameComponents, tokenPath)
}
return k3kcontroller.SafeConcatNameWithPrefix(nameComponents...)
}

View File

@@ -1,327 +0,0 @@
package provider
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/utils/ptr"
authv1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
func Test_hasKubeAccessVolume(t *testing.T) {
tests := []struct {
name string
pod *corev1.Pod
want bool
}{
{
name: "no volumes",
pod: &corev1.Pod{},
want: false,
},
{
name: "volume with kube-api-access prefix",
pod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "kube-api-access-abc123"},
},
},
},
want: true,
},
{
name: "exact kube-api-access name",
pod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "kube-api-access"},
},
},
},
want: true,
},
{
name: "volume without kube-api-access prefix",
pod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "my-volume"},
},
},
},
want: false,
},
{
name: "multiple volumes with one kube-api-access",
pod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "config-volume"},
{Name: "kube-api-access-xyz"},
{Name: "data-volume"},
},
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, hasKubeAccessVolume(tt.pod))
})
}
}
func Test_removeKubeAccessVolume(t *testing.T) {
t.Run("removes volume and all volume mounts from containers", func(t *testing.T) {
pod := &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "config-volume"},
{Name: "kube-api-access-abc"},
{Name: "data-volume"},
},
InitContainers: []corev1.Container{
{
Name: "init",
VolumeMounts: []corev1.VolumeMount{
{Name: "config-volume", MountPath: "/config"},
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
},
},
},
Containers: []corev1.Container{
{
Name: "main",
VolumeMounts: []corev1.VolumeMount{
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
{Name: "data-volume", MountPath: "/data"},
},
},
},
EphemeralContainers: []corev1.EphemeralContainer{
{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: "debug",
VolumeMounts: []corev1.VolumeMount{
{Name: "kube-api-access-abc", MountPath: serviceAccountTokenMountPath},
},
},
},
},
},
}
removeKubeAccessVolume(pod)
// Verify volume was removed
assert.Equal(t, 2, len(pod.Spec.Volumes))
assert.Equal(t, "config-volume", pod.Spec.Volumes[0].Name)
assert.Equal(t, "data-volume", pod.Spec.Volumes[1].Name)
// Verify init container mount was removed
assert.Equal(t, 1, len(pod.Spec.InitContainers[0].VolumeMounts))
assert.Equal(t, "config-volume", pod.Spec.InitContainers[0].VolumeMounts[0].Name)
// Verify container mount was removed
assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts))
assert.Equal(t, "data-volume", pod.Spec.Containers[0].VolumeMounts[0].Name)
// Verify ephemeral container mount was removed
assert.Equal(t, 0, len(pod.Spec.EphemeralContainers[0].VolumeMounts))
})
t.Run("no kube-api-access volume present", func(t *testing.T) {
pod := &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "config-volume"},
},
Containers: []corev1.Container{
{
Name: "main",
VolumeMounts: []corev1.VolumeMount{
{Name: "config-volume", MountPath: "/config"},
},
},
},
},
}
removeKubeAccessVolume(pod)
assert.Equal(t, 1, len(pod.Spec.Volumes))
assert.Equal(t, "config-volume", pod.Spec.Volumes[0].Name)
assert.Equal(t, 1, len(pod.Spec.Containers[0].VolumeMounts))
})
}
func Test_addKubeAccessVolume(t *testing.T) {
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
hostSecretName := "host-secret-token"
pod := &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{Name: "existing-volume"},
},
InitContainers: []corev1.Container{
{Name: "init"},
},
Containers: []corev1.Container{
{Name: "main"},
{Name: "sidecar"},
},
EphemeralContainers: []corev1.EphemeralContainer{
{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: "debug",
},
},
},
},
}
addKubeAccessVolume(pod, hostSecretName)
// Verify volume was added
assert.Equal(t, 2, len(pod.Spec.Volumes))
addedVol := pod.Spec.Volumes[1]
assert.Equal(t, tokenVolumeName, addedVol.Name)
assert.Equal(t, hostSecretName, addedVol.Secret.SecretName)
// Verify init container mount was added
assert.Equal(t, 1, len(pod.Spec.InitContainers[0].VolumeMounts))
assert.Equal(t, tokenVolumeName, pod.Spec.InitContainers[0].VolumeMounts[0].Name)
assert.Equal(t, serviceAccountTokenMountPath, pod.Spec.InitContainers[0].VolumeMounts[0].MountPath)
// Verify all container mounts were added
for _, c := range pod.Spec.Containers {
assert.Equal(t, 1, len(c.VolumeMounts), "container %s should have mount", c.Name)
assert.Equal(t, tokenVolumeName, c.VolumeMounts[0].Name)
assert.Equal(t, serviceAccountTokenMountPath, c.VolumeMounts[0].MountPath)
}
// Verify ephemeral container mounts were added
for _, c := range pod.Spec.EphemeralContainers {
assert.Equal(t, 1, len(c.VolumeMounts), "ephemeral container %s should have mount", c.Name)
assert.Equal(t, tokenVolumeName, c.VolumeMounts[0].Name)
assert.Equal(t, serviceAccountTokenMountPath, c.VolumeMounts[0].MountPath)
}
}
func Test_virtualSecret(t *testing.T) {
s := virtualSecret("my-secret", "my-ns", "my-sa")
assert.Equal(t, "my-secret", s.Name)
assert.Equal(t, "my-ns", s.Namespace)
assert.Equal(t, corev1.SecretTypeServiceAccountToken, s.Type)
assert.Equal(t, "my-sa", s.Annotations[corev1.ServiceAccountNameKey])
assert.Equal(t, "Secret", s.Kind)
assert.Equal(t, "v1", s.APIVersion)
}
func Test_generateTokenSecretName(t *testing.T) {
tests := []struct {
name string
serviceAccountName string
tokenPath string
tokenReq *authv1.TokenRequest
want string
}{
{
name: "no audiences, no expiration, no path",
serviceAccountName: "default",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{},
},
want: "k3k-default",
},
{
name: "no audiences, with expiration",
serviceAccountName: "default",
tokenPath: "token",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
ExpirationSeconds: ptr.To(int64(3600)),
},
},
want: "k3k-default-3600-token",
},
{
name: "with single audience and expiration",
serviceAccountName: "my-sa",
tokenPath: "token",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
Audiences: []string{"api"},
ExpirationSeconds: ptr.To(int64(3600)),
},
},
want: "k3k-my-sa-api-3600-token",
},
{
name: "with multiple audiences and expiration",
serviceAccountName: "my-sa",
tokenPath: "token",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
Audiences: []string{"api", "vault"},
ExpirationSeconds: ptr.To(int64(3600)),
},
},
want: "k3k-my-sa-api-vault-3600-token",
},
{
name: "with audiences, no expiration",
serviceAccountName: "my-sa",
tokenPath: "vault-token",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
Audiences: []string{"api"},
},
},
want: "k3k-my-sa-api-vault-token",
},
{
name: "different paths produce different names",
serviceAccountName: "my-sa",
tokenPath: "other-path",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
Audiences: []string{"api"},
ExpirationSeconds: ptr.To(int64(3600)),
},
},
want: "k3k-my-sa-api-3600-other-path",
},
{
name: "long name gets truncated with hash",
serviceAccountName: "my-very-long-service-account-name",
tokenPath: "some-very-long-token-path-value",
tokenReq: &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
Audiences: []string{"some-very-long-audience-string"},
ExpirationSeconds: ptr.To(int64(3600)),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := generateTokenSecretName(tt.serviceAccountName, tt.tokenPath, tt.tokenReq)
if tt.want != "" {
assert.Equal(t, tt.want, got)
}
assert.Less(t, len(got), 64, "name should be under 64 characters")
})
}
}

View File

@@ -35,6 +35,7 @@ var (
config cluster.Config
kubeconfig string
kubeletPortRange string
webhookPortRange string
maxConcurrentReconciles int
debug bool
logFormat string
@@ -71,6 +72,7 @@ func main() {
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
@@ -108,7 +110,7 @@ func run(cmd *cobra.Command, args []string) error {
return err
}
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange)
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
if err := mgr.Add(runnable); err != nil {
return err
}

View File

@@ -169,18 +169,6 @@ type ClusterSpec struct {
// +optional
WorkerLimit corev1.ResourceList `json:"workerLimit,omitempty"`
// ServerAffinity specifies the affinity rules for server pods.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
ServerAffinity *corev1.Affinity `json:"serverAffinity,omitempty"`
// AgentAffinity specifies the affinity rules for agent pods.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
AgentAffinity *corev1.Affinity `json:"agentAffinity,omitempty"`
// MirrorHostNodes controls whether node objects from the host cluster
// are mirrored into the virtual cluster.
//
@@ -203,19 +191,6 @@ type ClusterSpec struct {
//
// +optional
SecretMounts []SecretMount `json:"secretMounts,omitempty"`
// SecurityContext specifies custom SecurityContext to be added
// to the agent and server pods of the cluster in virtual or shared mode.
// This option will override the SecurityContext set by default for virtual mode.
//
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
// RuntimeClassName specifies alternative runtime class for the
// agent and server pods of the cluster in virtual or shared mode.
//
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
}
// SecretMount defines a secret to be mounted into server or agent pods,
@@ -274,14 +249,9 @@ type SyncConfig struct {
// +kubebuilder:default={"enabled": false}
// +optional
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
// StorageClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
// +optional
StorageClasses StorageClassSyncConfig `json:"storageClasses"`
}
// SecretSyncConfig specifies the sync options for Secrets.
// SecretSyncConfig specifies the sync options for services.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -296,7 +266,7 @@ type SecretSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ServiceSyncConfig specifies the sync options for Services.
// ServiceSyncConfig specifies the sync options for services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -311,7 +281,7 @@ type ServiceSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// ConfigMapSyncConfig specifies the sync options for ConfigMaps.
// ConfigMapSyncConfig specifies the sync options for services.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -326,7 +296,7 @@ type ConfigMapSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// IngressSyncConfig specifies the sync options for Ingresses.
// IngressSyncConfig specifies the sync options for services.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -339,16 +309,9 @@ type IngressSyncConfig struct {
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
// DisableTLSSecretTranslation is an on/off switch for translating TLS secrets
// from virtual cluster to host cluster
//
// +kubebuilder:default=false
// +optional
DisableTLSSecretTranslation bool `json:"disableTLSSecretTranslation,omitempty"`
}
// PersistentVolumeClaimSyncConfig specifies the sync options for PersistentVolumeClaims.
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -363,7 +326,7 @@ type PersistentVolumeClaimSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// PriorityClassSyncConfig specifies the sync options for PriorityClasses.
// PriorityClassSyncConfig specifies the sync options for services.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
@@ -378,21 +341,6 @@ type PriorityClassSyncConfig struct {
Selector map[string]string `json:"selector,omitempty"`
}
// StorageClassSyncConfig specifies the sync options for StorageClasses.
type StorageClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
//
// +optional
Selector map[string]string `json:"selector,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
//
// +kubebuilder:validation:Enum=shared;virtual
@@ -601,6 +549,11 @@ type ClusterStatus struct {
// +optional
KubeletPort int `json:"kubeletPort,omitempty"`
// WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
//
// +optional
WebhookPort int `json:"webhookPort,omitempty"`
// Conditions are the individual conditions for the cluster set.
//
// +optional
@@ -631,35 +584,6 @@ type AppliedPolicy struct {
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// serverAffinity is the affinity rules for server pods enforced by the active VirtualClusterPolicy.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
ServerAffinity *corev1.Affinity `json:"serverAffinity,omitempty"`
// agentAffinity is the affinity rules for agent pods enforced by the active VirtualClusterPolicy.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
AgentAffinity *corev1.Affinity `json:"agentAffinity,omitempty"`
// sync is the SyncConfig enforced by the active VirtualClusterPolicy.
//
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
// SecurityContext specifies custom SecurityContext to be added
// to the agent and server pods of the cluster in virtual or shared mode.
//
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
// RuntimeClassName specifies alternative runtime class for the
// agent and server pods of the cluster in virtual or shared mode.
//
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
}
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
@@ -733,18 +657,6 @@ type VirtualClusterPolicySpec struct {
// +optional
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
// DefaultServerAffinity specifies the affinity rules applied to server pods of all clusters in the target Namespace.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
DefaultServerAffinity *corev1.Affinity `json:"defaultServerAffinity,omitempty"`
// DefaultAgentAffinity specifies the affinity rules applied to agent pods of all clusters in the target Namespace.
// This includes both node affinity and pod affinity/anti-affinity rules.
//
// +optional
DefaultAgentAffinity *corev1.Affinity `json:"defaultAgentAffinity,omitempty"`
// AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
//
// +kubebuilder:default=shared
@@ -767,18 +679,6 @@ type VirtualClusterPolicySpec struct {
// +kubebuilder:default={}
// +optional
Sync *SyncConfig `json:"sync,omitempty"`
// SecurityContext specifies custom SecurityContext to be added
// to the agent and server pods of the cluster in virtual or shared mode.
//
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
// RuntimeClassName specifies alternative runtime class for the
// agent and server pods of the cluster in virtual or shared mode.
//
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
}
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.

View File

@@ -40,31 +40,6 @@ func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
(*out)[key] = val
}
}
if in.ServerAffinity != nil {
in, out := &in.ServerAffinity, &out.ServerAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.AgentAffinity != nil {
in, out := &in.AgentAffinity, &out.AgentAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.SecurityContext)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
@@ -215,16 +190,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val.DeepCopy()
}
}
if in.ServerAffinity != nil {
in, out := &in.ServerAffinity, &out.ServerAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.AgentAffinity != nil {
in, out := &in.AgentAffinity, &out.AgentAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.CustomCAs != nil {
in, out := &in.CustomCAs, &out.CustomCAs
*out = new(CustomCAs)
@@ -242,16 +207,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.SecurityContext)
(*in).DeepCopyInto(*out)
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -623,28 +578,6 @@ func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageClassSyncConfig) DeepCopyInto(out *StorageClassSyncConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSyncConfig.
func (in *StorageClassSyncConfig) DeepCopy() *StorageClassSyncConfig {
if in == nil {
return nil
}
out := new(StorageClassSyncConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
*out = *in
@@ -654,7 +587,6 @@ func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
in.Ingresses.DeepCopyInto(&out.Ingresses)
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
in.StorageClasses.DeepCopyInto(&out.StorageClasses)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
@@ -746,16 +678,6 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
(*out)[key] = val
}
}
if in.DefaultServerAffinity != nil {
in, out := &in.DefaultServerAffinity, &out.DefaultServerAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.DefaultAgentAffinity != nil {
in, out := &in.DefaultAgentAffinity, &out.DefaultAgentAffinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.PodSecurityAdmissionLevel != nil {
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
*out = new(PodSecurityAdmissionLevel)
@@ -766,16 +688,6 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
*out = new(SyncConfig)
(*in).DeepCopyInto(*out)
}
if in.RuntimeClassName != nil {
in, out := &in.RuntimeClassName, &out.RuntimeClassName
*out = new(string)
**out = **in
}
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.SecurityContext)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"os"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
@@ -19,6 +19,7 @@ import (
const (
kubeletPortRangeConfigMapName = "k3k-kubelet-port-range"
webhookPortRangeConfigMapName = "k3k-webhook-port-range"
rangeKey = "range"
allocatedPortsKey = "allocatedPorts"
@@ -29,6 +30,7 @@ type PortAllocator struct {
ctrlruntimeclient.Client
KubeletCM *v1.ConfigMap
WebhookCM *v1.ConfigMap
}
func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*PortAllocator, error) {
@@ -40,20 +42,32 @@ func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*Po
return nil, fmt.Errorf("failed to find k3k controller namespace")
}
var kubeletPortRangeCM v1.ConfigMap
var kubeletPortRangeCM, webhookPortRangeCM v1.ConfigMap
kubeletPortRangeCM.Name = kubeletPortRangeConfigMapName
kubeletPortRangeCM.Namespace = portRangeConfigMapNamespace
webhookPortRangeCM.Name = webhookPortRangeConfigMapName
webhookPortRangeCM.Namespace = portRangeConfigMapNamespace
return &PortAllocator{
Client: client,
KubeletCM: &kubeletPortRangeCM,
WebhookCM: &webhookPortRangeCM,
}, nil
}
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange string) manager.Runnable {
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange, webhookPortRange string) manager.Runnable {
return manager.RunnableFunc(func(ctx context.Context) error {
return a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange)
if err := a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange); err != nil {
return err
}
if err := a.getOrCreate(ctx, a.WebhookCM, webhookPortRange); err != nil {
return err
}
return nil
})
}
@@ -85,6 +99,14 @@ func (a *PortAllocator) getOrCreate(ctx context.Context, configmap *v1.ConfigMap
return nil
}
func (a *PortAllocator) AllocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM)
}
func (a *PortAllocator) DeallocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string, webhookPort int) error {
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM, webhookPort)
}
func (a *PortAllocator) AllocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
return a.allocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM)
}

View File

@@ -2,21 +2,26 @@ package agent
import (
"context"
"crypto"
"crypto/x509"
"errors"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/intstr"
certutil "github.com/rancher/dynamiclistener/cert"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
const (
@@ -32,10 +37,11 @@ type SharedAgent struct {
imageRegistry string
token string
kubeletPort int
webhookPort int
imagePullSecrets []string
}
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort int, imagePullSecrets []string) *SharedAgent {
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
return &SharedAgent{
Config: config,
serviceIP: serviceIP,
@@ -43,6 +49,7 @@ func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token str
imagePullPolicy: imagePullPolicy,
token: token,
kubeletPort: kubeletPort,
webhookPort: webhookPort,
imagePullSecrets: imagePullSecrets,
}
}
@@ -60,6 +67,7 @@ func (s *SharedAgent) EnsureResources(ctx context.Context) error {
s.service(ctx),
s.daemonset(ctx),
s.dnsService(ctx),
s.webhookTLS(ctx),
); err != nil {
return fmt.Errorf("failed to ensure some resources: %w", err)
}
@@ -72,7 +80,7 @@ func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Ob
}
func (s *SharedAgent) config(ctx context.Context) error {
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort)
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort, s.webhookPort)
configSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -91,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort int) string {
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
@@ -104,8 +112,9 @@ serviceName: %s
token: %v
mirrorHostNodes: %t
version: %s
webhookPort: %d
kubeletPort: %d`,
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, kubeletPort)
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, webhookPort, kubeletPort)
}
func (s *SharedAgent) daemonset(ctx context.Context) error {
@@ -133,7 +142,7 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: s.podSpec(ctx),
Spec: s.podSpec(),
},
},
}
@@ -141,9 +150,7 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
return s.ensureObject(ctx, deploy)
}
func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
log := ctrl.LoggerFrom(ctx)
func (s *SharedAgent) podSpec() v1.PodSpec {
hostNetwork := false
dnsPolicy := v1.DNSClusterFirst
@@ -158,15 +165,7 @@ func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
image = s.imageRegistry + "/" + s.image
}
// Use the agent affinity from the policy status if it exists, otherwise fall back to the spec.
agentAffinity := s.cluster.Spec.AgentAffinity
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.AgentAffinity != nil {
log.V(1).Info("Using agent affinity from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
agentAffinity = s.cluster.Status.Policy.AgentAffinity
}
podSpec := v1.PodSpec{
Affinity: agentAffinity,
HostNetwork: hostNetwork,
DNSPolicy: dnsPolicy,
ServiceAccountName: s.Name(),
@@ -186,6 +185,28 @@ func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
},
},
},
{
Name: "webhook-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: WebhookSecretName(s.cluster.Name),
Items: []v1.KeyToPath{
{
Key: "tls.crt",
Path: "tls.crt",
},
{
Key: "tls.key",
Path: "tls.key",
},
{
Key: "ca.crt",
Path: "ca.crt",
},
},
},
},
},
},
Containers: []v1.Container{
{
@@ -221,6 +242,11 @@ func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
MountPath: "/opt/rancher/k3k/",
ReadOnly: false,
},
{
Name: "webhook-certs",
MountPath: "/opt/rancher/k3k-webhook",
ReadOnly: false,
},
},
Ports: []v1.ContainerPort{
{
@@ -228,6 +254,11 @@ func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
Protocol: v1.ProtocolTCP,
ContainerPort: int32(s.kubeletPort),
},
{
Name: "webhook-port",
Protocol: v1.ProtocolTCP,
ContainerPort: int32(s.webhookPort),
},
},
},
},
@@ -236,23 +267,6 @@ func (s *SharedAgent) podSpec(ctx context.Context) v1.PodSpec {
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
securityContext := s.cluster.Spec.SecurityContext
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.SecurityContext != nil {
log.V(1).Info("Using securityContext configuration from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
securityContext = s.cluster.Status.Policy.SecurityContext
}
if securityContext != nil {
podSpec.Containers[0].SecurityContext = securityContext
}
runtimeClassName := s.cluster.Spec.RuntimeClassName
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.RuntimeClassName != nil {
log.V(1).Info("Using runtimeClassName from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
runtimeClassName = s.cluster.Status.Policy.RuntimeClassName
}
podSpec.RuntimeClassName = runtimeClassName
return podSpec
}
@@ -279,6 +293,12 @@ func (s *SharedAgent) service(ctx context.Context) error {
Protocol: v1.ProtocolTCP,
Port: int32(s.kubeletPort),
},
{
Name: "webhook-server",
Protocol: v1.ProtocolTCP,
Port: int32(s.webhookPort),
TargetPort: intstr.FromInt32(int32(s.webhookPort)),
},
},
},
}
@@ -418,3 +438,94 @@ func (s *SharedAgent) roleBinding(ctx context.Context) error {
return s.ensureObject(ctx, roleBinding)
}
func (s *SharedAgent) webhookTLS(ctx context.Context) error {
webhookSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: WebhookSecretName(s.cluster.Name),
Namespace: s.cluster.Namespace,
},
}
key := ctrlruntimeclient.ObjectKeyFromObject(webhookSecret)
if err := s.client.Get(ctx, key, webhookSecret); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
caPrivateKeyPEM, caCertPEM, err := newWebhookSelfSignedCACerts()
if err != nil {
return err
}
altNames := []string{s.Name(), s.cluster.Name}
webhookCert, webhookKey, err := newWebhookCerts(s.Name(), altNames, caPrivateKeyPEM, caCertPEM)
if err != nil {
return err
}
webhookSecret.Data = map[string][]byte{
"tls.crt": webhookCert,
"tls.key": webhookKey,
"ca.crt": caCertPEM,
"ca.key": caPrivateKeyPEM,
}
return s.ensureObject(ctx, webhookSecret)
}
// if the webhook secret is found we can skip
// we should check for their validity
return nil
}
func newWebhookSelfSignedCACerts() ([]byte, []byte, error) {
// generate CA CERT/KEY
caPrivateKeyPEM, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, nil, err
}
caPrivateKey, err := certutil.ParsePrivateKeyPEM(caPrivateKeyPEM)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("k3k-webhook-ca@%d", time.Now().Unix()),
}
caCert, err := certutil.NewSelfSignedCACert(cfg, caPrivateKey.(crypto.Signer))
if err != nil {
return nil, nil, err
}
caCertPEM := certutil.EncodeCertPEM(caCert)
return caPrivateKeyPEM, caCertPEM, nil
}
func newWebhookCerts(commonName string, subAltNames []string, caPrivateKey, caCert []byte) ([]byte, []byte, error) {
// generate webhook cert bundle
altNames := certs.AddSANs(subAltNames)
oneYearExpiration := time.Until(time.Now().AddDate(1, 0, 0))
return certs.CreateClientCertKey(
commonName,
nil,
&altNames,
[]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
oneYearExpiration,
string(caCert),
string(caPrivateKey),
)
}
func WebhookSecretName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "webhook")
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -17,6 +17,7 @@ func Test_sharedAgentData(t *testing.T) {
serviceName string
ip string
kubeletPort int
webhookPort int
token string
}
@@ -38,6 +39,7 @@ func Test_sharedAgentData(t *testing.T) {
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -51,6 +53,7 @@ func Test_sharedAgentData(t *testing.T) {
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
@@ -70,6 +73,7 @@ func Test_sharedAgentData(t *testing.T) {
},
ip: "10.0.0.21",
kubeletPort: 10250,
webhookPort: 9443,
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
},
@@ -82,6 +86,7 @@ func Test_sharedAgentData(t *testing.T) {
"version": "v1.2.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
{
@@ -97,6 +102,7 @@ func Test_sharedAgentData(t *testing.T) {
},
},
kubeletPort: 10250,
webhookPort: 9443,
ip: "10.0.0.21",
serviceName: "service-name",
token: "dnjklsdjnksd892389238",
@@ -110,13 +116,14 @@ func Test_sharedAgentData(t *testing.T) {
"version": "v1.3.3",
"mirrorHostNodes": "false",
"kubeletPort": "10250",
"webhookPort": "9443",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort)
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort)
data := make(map[string]string)
err := yaml.Unmarshal([]byte(config), data)

View File

@@ -10,7 +10,6 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
@@ -100,7 +99,7 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
"mode": "virtual",
},
}
podSpec := v.podSpec(ctx, image, name)
podSpec := v.podSpec(image, name)
if len(v.cluster.Spec.SecretMounts) > 0 {
vols, volMounts := mounts.BuildSecretsMountsVolumes(v.cluster.Spec.SecretMounts, "agent")
@@ -135,22 +134,13 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
return v.ensureObject(ctx, deployment)
}
func (v *VirtualAgent) podSpec(ctx context.Context, image, name string) v1.PodSpec {
log := ctrl.LoggerFrom(ctx)
func (v *VirtualAgent) podSpec(image, name string) v1.PodSpec {
var limit v1.ResourceList
args := v.cluster.Spec.AgentArgs
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
// Use the agent affinity from the policy status if it exists, otherwise fall back to the spec.
agentAffinity := v.cluster.Spec.AgentAffinity
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.AgentAffinity != nil {
log.V(1).Info("Using agent affinity from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
agentAffinity = v.cluster.Status.Policy.AgentAffinity
}
podSpec := v1.PodSpec{
Affinity: agentAffinity,
NodeSelector: v.cluster.Spec.NodeSelector,
Volumes: []v1.Volume{
{
@@ -272,23 +262,5 @@ func (v *VirtualAgent) podSpec(ctx context.Context, image, name string) v1.PodSp
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
}
securityContext := v.cluster.Spec.SecurityContext
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.SecurityContext != nil {
log.V(1).Info("Using securityContext configuration from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
securityContext = v.cluster.Status.Policy.SecurityContext
}
if securityContext != nil {
podSpec.Containers[0].SecurityContext = securityContext
}
runtimeClassName := v.cluster.Spec.RuntimeClassName
if v.cluster.Status.Policy != nil && v.cluster.Status.Policy.RuntimeClassName != nil {
log.V(1).Info("Using runtimeClassName from policy", "policyName", v.cluster.Status.PolicyName, "clusterName", v.cluster.Name)
runtimeClassName = v.cluster.Status.Policy.RuntimeClassName
}
podSpec.RuntimeClassName = runtimeClassName
return podSpec
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
)
func Test_virtualAgentData(t *testing.T) {

View File

@@ -11,7 +11,6 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
@@ -29,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
@@ -49,18 +47,11 @@ const (
clusterFinalizerName = "cluster.k3k.io/finalizer"
ClusterInvalidName = "system"
SyncEnabledLabelKey = "k3k.io/sync-enabled"
SyncSourceLabelKey = "k3k.io/sync-source"
SyncSourceHostLabel = "host"
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
memberRemovalTimeout = time.Minute * 1
storageClassEnabledIndexField = "spec.sync.storageClasses.enabled"
storageClassStatusEnabledIndexField = "status.policy.sync.storageClasses.enabled"
)
var (
@@ -124,82 +115,15 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
},
}
// index the 'spec.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Spec.Sync != nil && vc.Spec.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
// index the 'status.policy.sync.storageClasses.enabled' field
err = mgr.GetCache().IndexField(ctx, &v1beta1.Cluster{}, storageClassStatusEnabledIndexField, func(rawObj client.Object) []string {
vc := rawObj.(*v1beta1.Cluster)
if vc.Status.Policy != nil && vc.Status.Policy.Sync != nil && vc.Status.Policy.Sync.StorageClasses.Enabled {
return []string{"true"}
}
return []string{"false"}
})
if err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Watches(&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(reconciler.mapStorageClassToCluster),
).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (r *ClusterReconciler) mapStorageClassToCluster(ctx context.Context, obj client.Object) []reconcile.Request {
log := ctrl.LoggerFrom(ctx)
if _, ok := obj.(*storagev1.StorageClass); !ok {
return nil
}
// Merge and deduplicate clusters
allClusters := make(map[types.NamespacedName]struct{})
var specClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &specClusterList, client.MatchingFields{storageClassEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with spec sync enabled for storageclass sync")
} else {
for _, cluster := range specClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
var statusClusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &statusClusterList, client.MatchingFields{storageClassStatusEnabledIndexField: "true"}); err != nil {
log.Error(err, "error listing clusters with status sync enabled for storageclass sync")
} else {
for _, cluster := range statusClusterList.Items {
allClusters[client.ObjectKeyFromObject(&cluster)] = struct{}{}
}
}
requests := make([]reconcile.Request, 0, len(allClusters))
for key := range allClusters {
requests = append(requests, reconcile.Request{NamespacedName: key})
}
return requests
}
func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
return handler.Funcs{
// We don't need to update for create or delete events
@@ -426,22 +350,11 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
return err
}
if err := c.bindClusterRoles(ctx, cluster); err != nil {
return err
}
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
return err
}
// Important: if you need to call the Server API of the Virtual Cluster
// this needs to be done AFTER he kubeconfig has been generated
if err := c.ensureStorageClasses(ctx, cluster); err != nil {
return err
}
return nil
return c.bindClusterRoles(ctx, cluster)
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
@@ -595,10 +508,6 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
Except: []string{cluster.Status.ClusterCIDR},
},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
@@ -606,10 +515,6 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
},
},
},
},
},
{
To: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
@@ -715,120 +620,6 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.
return nil
}
func (c *ClusterReconciler) ensureStorageClasses(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Ensuring cluster StorageClasses")
virtualClient, err := newVirtualClient(ctx, c.Client, cluster.Name, cluster.Namespace)
if err != nil {
return fmt.Errorf("failed creating virtual client: %w", err)
}
appliedSync := cluster.Spec.Sync.DeepCopy()
// If a policy is applied to the virtual cluster we need to use its SyncConfig, if available
if cluster.Status.Policy != nil && cluster.Status.Policy.Sync != nil {
appliedSync = cluster.Status.Policy.Sync
}
// If storageclass sync is disabled, clean up any managed storage classes.
if appliedSync == nil || !appliedSync.StorageClasses.Enabled {
err := virtualClient.DeleteAllOf(ctx, &storagev1.StorageClass{}, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel})
return client.IgnoreNotFound(err)
}
var hostStorageClasses storagev1.StorageClassList
if err := c.Client.List(ctx, &hostStorageClasses); err != nil {
return fmt.Errorf("failed listing host storageclasses: %w", err)
}
// filter the StorageClasses disabled for the sync, and the one not matching the selector
filteredHostStorageClasses := make(map[string]storagev1.StorageClass)
for _, sc := range hostStorageClasses.Items {
syncEnabled, found := sc.Labels[SyncEnabledLabelKey]
// if sync is disabled -> continue
if found && syncEnabled != "true" {
log.V(1).Info("sync is disabled", "sc-name", sc.Name)
continue
}
// if selector doesn't match -> continue
// an empty selector matche everything
selector := labels.SelectorFromSet(appliedSync.StorageClasses.Selector)
if !selector.Matches(labels.Set(sc.Labels)) {
log.V(1).Info("selector not matching", "sc-name", sc.Name)
continue
}
log.V(1).Info("keeping storageclass", "sc-name", sc.Name)
filteredHostStorageClasses[sc.Name] = sc
}
var virtStorageClasses storagev1.StorageClassList
if err = virtualClient.List(ctx, &virtStorageClasses, client.MatchingLabels{SyncSourceLabelKey: SyncSourceHostLabel}); err != nil {
return fmt.Errorf("failed listing virtual storageclasses: %w", err)
}
// delete StorageClasses with the sync disabled
for _, sc := range virtStorageClasses.Items {
if _, found := filteredHostStorageClasses[sc.Name]; !found {
log.V(1).Info("deleting storageclass", "sc-name", sc.Name)
if errDelete := virtualClient.Delete(ctx, &sc); errDelete != nil {
log.Error(errDelete, "failed to delete virtual storageclass", "name", sc.Name)
err = errors.Join(err, errDelete)
}
}
}
for _, hostSc := range filteredHostStorageClasses {
log.V(1).Info("updating storageclass", "sc-name", hostSc.Name)
virtualSc := hostSc.DeepCopy()
virtualSc.ObjectMeta = metav1.ObjectMeta{
Name: hostSc.Name,
Labels: hostSc.Labels,
Annotations: hostSc.Annotations,
}
_, errCreateOrUpdate := controllerutil.CreateOrUpdate(ctx, virtualClient, virtualSc, func() error {
virtualSc.Annotations = hostSc.Annotations
virtualSc.Labels = hostSc.Labels
if len(virtualSc.Labels) == 0 {
virtualSc.Labels = make(map[string]string)
}
virtualSc.Labels[SyncSourceLabelKey] = SyncSourceHostLabel
virtualSc.Provisioner = hostSc.Provisioner
virtualSc.Parameters = hostSc.Parameters
virtualSc.ReclaimPolicy = hostSc.ReclaimPolicy
virtualSc.MountOptions = hostSc.MountOptions
virtualSc.AllowVolumeExpansion = hostSc.AllowVolumeExpansion
virtualSc.VolumeBindingMode = hostSc.VolumeBindingMode
virtualSc.AllowedTopologies = hostSc.AllowedTopologies
return nil
})
if errCreateOrUpdate != nil {
log.Error(errCreateOrUpdate, "failed to create or update virtual storageclass", "name", virtualSc.Name)
err = errors.Join(err, errCreateOrUpdate)
}
}
if err != nil {
return fmt.Errorf("failed to sync storageclasses: %w", err)
}
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
@@ -910,6 +701,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cl
} else {
// Assign port from pool if shared agent enabled mirroring of host nodes
kubeletPort := 10250
webhookPort := 9443
if cluster.Spec.MirrorHostNodes {
var err error
@@ -920,9 +712,16 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cl
}
cluster.Status.KubeletPort = kubeletPort
webhookPort, err = c.PortAllocator.AllocateWebhookPort(ctx, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
cluster.Status.WebhookPort = webhookPort
}
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, c.AgentImagePullSecrets)
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
}
return agentEnsurer.EnsureResources(ctx)
@@ -943,6 +742,11 @@ func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.Vi
}
}
// validate sync policy
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
}
return nil
}

View File

@@ -40,13 +40,17 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
return reconcile.Result{}, err
}
// Deallocate ports for kubelet if used
// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.V(1).Info("dellocating ports for kubelet")
log.V(1).Info("dellocating ports for kubelet and webhook")
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
return reconcile.Result{}, err
}
if err := c.PortAllocator.DeallocateWebhookPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.WebhookPort); err != nil {
return reconcile.Result{}, err
}
}
// delete API server lease

View File

@@ -17,7 +17,6 @@ import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
@@ -54,18 +53,8 @@ func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePull
}
}
func (s *Server) podSpec(ctx context.Context, image, name string, persistent bool, startupCmd string) v1.PodSpec {
log := ctrl.LoggerFrom(ctx)
// Use the server affinity from the policy status if it exists, otherwise fall back to the spec.
serverAffinity := s.cluster.Spec.ServerAffinity
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.ServerAffinity != nil {
log.V(1).Info("Using server affinity from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
serverAffinity = s.cluster.Status.Policy.ServerAffinity
}
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
podSpec := v1.PodSpec{
Affinity: serverAffinity,
NodeSelector: s.cluster.Spec.NodeSelector,
PriorityClassName: s.cluster.Spec.PriorityClass,
Volumes: []v1.Volume{
@@ -246,24 +235,6 @@ func (s *Server) podSpec(ctx context.Context, image, name string, persistent boo
}
}
securityContext := s.cluster.Spec.SecurityContext
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.SecurityContext != nil {
log.V(1).Info("Using securityContext configuration from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
securityContext = s.cluster.Status.Policy.SecurityContext
}
if securityContext != nil {
podSpec.Containers[0].SecurityContext = securityContext
}
runtimeClassName := s.cluster.Spec.RuntimeClassName
if s.cluster.Status.Policy != nil && s.cluster.Status.Policy.RuntimeClassName != nil {
log.V(1).Info("Using runtimeClassName from policy", "policyName", s.cluster.Status.PolicyName, "clusterName", s.cluster.Name)
runtimeClassName = s.cluster.Status.Policy.RuntimeClassName
}
podSpec.RuntimeClassName = runtimeClassName
// specify resource limits if specified for the servers.
if s.cluster.Spec.ServerLimit != nil {
podSpec.Containers[0].Resources = v1.ResourceRequirements{
@@ -350,7 +321,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
return nil, err
}
podSpec := s.podSpec(ctx, image, name, persistent, startupCommand)
podSpec := s.podSpec(image, name, persistent, startupCommand)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)

View File

@@ -473,14 +473,9 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
origStatus := cluster.Status.DeepCopy()
cluster.Status.Policy = &v1beta1.AppliedPolicy{
Name: policy.Name,
PriorityClass: &policy.Spec.DefaultPriorityClass,
NodeSelector: policy.Spec.DefaultNodeSelector,
Sync: policy.Spec.Sync,
ServerAffinity: policy.Spec.DefaultServerAffinity,
AgentAffinity: policy.Spec.DefaultAgentAffinity,
SecurityContext: policy.Spec.SecurityContext,
RuntimeClassName: policy.Spec.RuntimeClassName,
Name: policy.Name,
PriorityClass: &policy.Spec.DefaultPriorityClass,
NodeSelector: policy.Spec.DefaultNodeSelector,
}
if !reflect.DeepEqual(origStatus, &cluster.Status) {

View File

@@ -100,7 +100,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", "--version", k3sVersion, clusterName)
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.5-k3s1", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
})
@@ -255,7 +255,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
})
// Create the cluster with initial version
_, stderr, err = K3kcli("cluster", "create", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.0-k3s1", "--namespace", clusterNamespace, clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
@@ -293,7 +293,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
// Attempt to downgrade should fail
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", k3sOldVersion, "--namespace", clusterNamespace, clusterName)
_, stderr, err = K3kcli("cluster", "update", "-y", "--version", "v1.33.0-k3s1", "--namespace", clusterNamespace, clusterName)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("downgrading cluster version is not supported"))

View File

@@ -23,7 +23,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
v1 "k8s.io/api/core/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
@@ -217,29 +216,10 @@ var _ = AfterSuite(func() {
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
}
})
func getK3kLogs(ctx context.Context) io.ReadCloser {
var podList v1.PodList
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).NotTo(BeEmpty())
k3kPod := podList.Items[0]
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{})
podLogs, err := req.Stream(ctx)
Expect(err).To(Not(HaveOccurred()))
return podLogs
}
func writeLogs(filename string, logs io.ReadCloser) {
logsStr, err := io.ReadAll(logs)
Expect(err).To(Not(HaveOccurred()))

View File

@@ -1,44 +0,0 @@
package k3k_test
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("creating a shared mode cluster", Label(e2eTestLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("creates nodes with the worker role", func() {
Eventually(func(g Gomega) {
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(GinkgoT().Context(), metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(nodes.Items).To(HaveLen(1))
g.Expect(nodes.Items[0].Labels).To(HaveKeyWithValue("node-role.kubernetes.io/worker", "true"))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})

View File

@@ -337,82 +337,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
})
})
When("creating a Pod with downward API variables in environment variable", func() {
var virtualPod *v1.Pod
BeforeEach(func() {
ctx := context.Background()
var err error
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: "default",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "STATUS_POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
},
},
},
}
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should be scheduled and running in the virtual cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
pod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
g.Expect(pod.Status.PodIP).NotTo(BeEmpty())
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
})
It("should be scheduled and running in the host cluster", func() {
ctx := context.Background()
Eventually(func(g Gomega) {
translator := translate.NewHostTranslator(virtualCluster.Cluster)
hostPodName := translator.NamespacedName(virtualPod)
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
})
})
When("installing the nginx-ingress controller", func() {
BeforeAll(func() {
By("installing the nginx-ingress controller")

View File

@@ -1,200 +0,0 @@
package k3k_test
import (
"context"
"time"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created in a namespace with a policy", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
vcp *v1beta1.VirtualClusterPolicy
)
BeforeAll(func() {
ctx = context.Background()
// 1. Create StorageClasses in host
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-enabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
var err error
storageClassEnabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-policy-disabled-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 2. Create VirtualClusterPolicy with StorageClass sync enabled
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vcp-sync-sc-",
},
Spec: v1beta1.VirtualClusterPolicySpec{
Sync: &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: true,
},
},
},
}
err = k8sClient.Create(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
// 3. Create Namespace with policy label
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ns-vcp-",
Labels: map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
},
},
}
// We use the k8s clientset for namespace creation to stay consistent with other tests
ns, err = k8s.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// 4. Create VirtualCluster in that namespace
// The cluster doesn't have storage class sync enabled in its spec
clusterObj := NewCluster(ns.Name)
clusterObj.Spec.Sync = &v1beta1.SyncConfig{
StorageClasses: v1beta1.StorageClassSyncConfig{
Enabled: false,
},
}
clusterObj.Spec.Expose.NodePort.ServerPort = ptr.To[int32](30000)
CreateCluster(clusterObj)
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(clusterObj)
virtualCluster = &VirtualCluster{
Cluster: clusterObj,
RestConfig: restConfig,
Client: client,
Kubeconfig: kubeconfig,
}
DeferCleanup(func() {
DeleteNamespaces(ns.Name)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8sClient.Delete(ctx, vcp)
Expect(err).To(Not(HaveOccurred()))
})
})
It("has the storage classes sync enabled from the policy", func() {
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Status.Policy).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync).To(Not(BeNil()))
g.Expect(virtualCluster.Cluster.Status.Policy.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
// We only care about the storage classes we created for this test to avoid noise
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled in the host", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
When("disabling the storage class sync in the policy", Ordered, func() {
BeforeAll(func() {
original := vcp.DeepCopy()
vcp.Spec.Sync.StorageClasses.Enabled = false
err := k8sClient.Patch(ctx, vcp, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
})
It("will remove the synced storage classes from the virtual cluster", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
g.Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "true" {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
WithPolling(time.Second).
WithTimeout(time.Second * 60).
Should(Succeed())
})
})
})

View File

@@ -1,194 +0,0 @@
package k3k_test
import (
"context"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/controller/cluster"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
var (
ctx context.Context
virtualCluster *VirtualCluster
)
BeforeAll(func() {
ctx = context.Background()
virtualCluster = NewVirtualCluster()
storageClassEnabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "true",
},
},
Provisioner: "my-provisioner",
}
storageClassEnabled, err := k8s.StorageV1().StorageClasses().Create(ctx, storageClassEnabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
storageClassDisabled := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
Labels: map[string]string{
cluster.SyncEnabledLabelKey: "false",
},
},
Provisioner: "my-provisioner",
}
storageClassDisabled, err = k8s.StorageV1().StorageClasses().Create(ctx, storageClassDisabled, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassEnabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
err = k8s.StorageV1().StorageClasses().Delete(ctx, storageClassDisabled.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
})
})
It("has disabled the storage classes sync", func() {
Expect(virtualCluster.Cluster.Spec.Sync).To(Not(BeNil()))
Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeFalse())
})
It("doesn't have storage classes", func() {
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(0))
})
It("has some storage classes in the host", func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(hostStorageClasses.Items).To(Not(HaveLen(0)))
})
It("can create storage classes in the virtual cluster", func() {
storageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "sc-",
},
Provisioner: "my-provisioner",
}
storageClass, err := virtualCluster.Client.StorageV1().StorageClasses().Create(ctx, storageClass, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
virtualStorageClasses, err := virtualCluster.Client.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(virtualStorageClasses.Items).To(HaveLen(1))
Expect(virtualStorageClasses.Items[0].Name).To(Equal(storageClass.Name))
})
When("enabling the storage class sync", Ordered, func() {
BeforeAll(func() {
GinkgoWriter.Println("Enabling the storage class sync")
original := virtualCluster.Cluster.DeepCopy()
virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled = true
err := k8sClient.Patch(ctx, virtualCluster.Cluster, client.MergeFrom(original))
Expect(err).To(Not(HaveOccurred()))
Eventually(func(g Gomega) {
key := client.ObjectKeyFromObject(virtualCluster.Cluster)
g.Expect(k8sClient.Get(ctx, key, virtualCluster.Cluster)).To(Succeed())
g.Expect(virtualCluster.Cluster.Spec.Sync.StorageClasses.Enabled).To(BeTrue())
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
})
It("will sync host storage classes with the sync enabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
g.Expect(err).To(Not(HaveOccurred()))
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
It("will not sync host storage classes with the sync disabled", func() {
Eventually(func(g Gomega) {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, hostSC.Name, metav1.GetOptions{})
if hostSC.Labels[cluster.SyncEnabledLabelKey] == "false" {
g.Expect(err).To(HaveOccurred())
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
When("editing a synced storage class in the host cluster", Ordered, func() {
var syncedStorageClass *storagev1.StorageClass
BeforeAll(func() {
hostStorageClasses, err := k8s.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, hostSC := range hostStorageClasses.Items {
if syncEnabled, found := hostSC.Labels[cluster.SyncEnabledLabelKey]; !found || syncEnabled == "true" {
syncedStorageClass = &hostSC
break
}
}
Expect(syncedStorageClass).To(Not(BeNil()))
syncedStorageClass.Labels["foo"] = "bar"
_, err = k8s.StorageV1().StorageClasses().Update(ctx, syncedStorageClass, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("will update the synced storage class in the virtual cluster", func() {
Eventually(func(g Gomega) {
_, err := virtualCluster.Client.StorageV1().StorageClasses().Get(ctx, syncedStorageClass.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(syncedStorageClass.Labels).Should(HaveKeyWithValue("foo", "bar"))
}).
MustPassRepeatedly(5).
WithPolling(time.Second).
WithTimeout(time.Second * 30).
Should(Succeed())
})
})
})

View File

@@ -479,7 +479,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
cluster := NewCluster(namespace.Name)
// Add initial old version
cluster.Spec.Version = k3sOldVersion
cluster.Spec.Version = "v1.33.0-k3s1"
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
@@ -568,7 +568,7 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
cluster := NewCluster(namespace.Name)
// Add initial old version
cluster.Spec.Version = k3sOldVersion
cluster.Spec.Version = "v1.33.0-k3s1"
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To[int32](1)

View File

@@ -48,8 +48,7 @@ import (
const (
k3kNamespace = "k3k-system"
k3sVersion = "v1.35.2-k3s1"
k3sOldVersion = "v1.35.0-k3s1"
k3sVersion = "v1.33.7-k3s1"
e2eTestLabel = "e2e"
slowTestsLabel = "slow"
@@ -362,7 +361,6 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).NotTo(BeEmpty())
k3kPod := podList.Items[0]
k3kContainerName := k3kPod.Spec.Containers[0].Name
@@ -456,7 +454,6 @@ func getK3kLogs(ctx context.Context) io.ReadCloser {
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).NotTo(BeEmpty())
k3kPod := podList.Items[0]
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{Previous: true})

View File

@@ -16,7 +16,6 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
@@ -62,13 +61,13 @@ var _ = BeforeSuite(func() {
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme, Metrics: metricsserver.Options{BindAddress: "0"}})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
Expect(err).NotTo(HaveOccurred())
err = mgr.Add(portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), "50000-51000"))
err = mgr.Add(portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), "50000-51000", "51001-52000"))
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())

View File

@@ -47,9 +47,6 @@ var IngressTests = func() {
Ingresses: v1beta1.IngressSyncConfig{
Enabled: true,
},
Secrets: v1beta1.SecretSyncConfig{
Enabled: true,
},
},
},
}
@@ -58,8 +55,6 @@ var IngressTests = func() {
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
@@ -297,194 +292,6 @@ var IngressTests = func() {
Should(BeTrue())
})
It("will sync an Ingress with a TLS secret", func() {
ctx := context.Background()
ingressSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-secret-",
Namespace: "default",
},
Data: map[string][]byte{
"ca.crt": []byte("value"),
"tls.crt": []byte("value"),
"tls.key": []byte("value"),
},
}
err := virtTestEnv.k8sClient.Create(ctx, ingressSecret)
Expect(err).NotTo(HaveOccurred())
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"test.com"},
SecretName: ingressSecret.Name,
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
var hostSecret v1.Secret
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
hostSecretName := translateName(cluster, ingress.Namespace, ingressSecret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
// Verify Ingress in host cluster does reference the translated secret since TLS secret translation is enabled
Expect(hostIngress.Spec.TLS[0].SecretName).To(Equal(hostSecretName))
})
It("will not sync an Ingress with a TLS secret", func() {
ctx := context.Background()
cluster.Spec.Sync.Ingresses.DisableTLSSecretTranslation = true
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
ingressSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-secret-",
Namespace: "default",
},
Data: map[string][]byte{
"ca.crt": []byte("value"),
"tls.crt": []byte("value"),
"tls.key": []byte("value"),
},
}
err = virtTestEnv.k8sClient.Create(ctx, ingressSecret)
Expect(err).NotTo(HaveOccurred())
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ingress-",
Namespace: "default",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "test.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: ptr.To(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Name: "test-port",
},
},
},
},
},
},
},
},
},
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"test.com"},
SecretName: ingressSecret.Name,
},
},
},
}
err = virtTestEnv.k8sClient.Create(ctx, ingress)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
var hostIngress networkingv1.Ingress
var hostSecret v1.Secret
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
hostSecretName := translateName(cluster, ingress.Namespace, ingressSecret.Name)
Eventually(func() error {
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
// Secret will be translated to host cluster but the Ingress will not reference it since TLS secret translation is disabled
Eventually(func() error {
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
}).
WithPolling(time.Millisecond * 300).
WithTimeout(time.Second * 10).
Should(BeNil())
// Verify Ingress in host cluster has guest cluster secret reference as translation has not been performed
Expect(hostIngress.Spec.TLS[0].SecretName).To(Equal(ingressSecret.Name))
})
It("will not sync an Ingress if disabled", func() {
ctx := context.Background()

View File

@@ -15,7 +15,6 @@ import (
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -50,7 +49,7 @@ var _ = BeforeSuite(func() {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme, Metrics: metricsserver.Options{BindAddress: "0"}})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))

View File

@@ -469,117 +469,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
Should(Succeed())
})
It("updates the cluster policy status with the DefaultServerAffinity and DefaultAgentAffinity", func() {
serverAffinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{Weight: 10},
},
},
}
agentAffinity := serverAffinity.DeepCopy()
agentAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 20
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultServerAffinity: serverAffinity,
DefaultAgentAffinity: agentAffinity,
})
bindPolicyToNamespace(namespace, policy)
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Spec.AgentAffinity).To(BeNil())
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.AgentAffinity).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.AgentAffinity).To(Equal(agentAffinity))
g.Expect(cluster.Spec.ServerAffinity).To(BeNil())
g.Expect(cluster.Status.Policy.ServerAffinity).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.ServerAffinity).To(Equal(serverAffinity))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
})
It("overrides the cluster ServerAffinity and AgentAffinity with the DefaultServerAffinity and DefaultAgentAffinity from the policy", func() {
serverAffinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{Weight: 10},
},
},
}
agentAffinity := serverAffinity.DeepCopy()
agentAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 20
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultServerAffinity: serverAffinity,
DefaultAgentAffinity: agentAffinity,
})
bindPolicyToNamespace(namespace, policy)
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
// Cluster values that will get overwritten by the policy in the cluster status
clusterAgentAffinity := agentAffinity.DeepCopy()
clusterAgentAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 30
clusterServerAffinity := serverAffinity.DeepCopy()
clusterServerAffinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 40
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
AgentAffinity: clusterAgentAffinity,
ServerAffinity: clusterServerAffinity,
},
}
err = k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// wait a bit
Eventually(func(g Gomega) {
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, cluster)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(cluster.Spec.AgentAffinity).To(Equal(clusterAgentAffinity))
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.AgentAffinity).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.AgentAffinity).To(Equal(agentAffinity))
g.Expect(cluster.Spec.ServerAffinity).To(Equal(clusterServerAffinity))
g.Expect(cluster.Status.Policy.ServerAffinity).To(Not(BeNil()))
g.Expect(cluster.Status.Policy.ServerAffinity).To(Equal(serverAffinity))
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(Succeed())
})
It("should create a ResourceQuota if Quota is enabled", func() {
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{