Compare commits

...

27 Commits

Author SHA1 Message Date
Enrico Candino
0185998aa0 Bump Charts to 1.0.2-rc1 (#609) 2026-01-15 14:12:52 +01:00
Guilherme Macedo
af5d33cfb8 Add FOSSA scanning workflow (#606)
Signed-off-by: Guilherme Macedo <guilherme@gmacedo.com>
2026-01-14 19:14:07 +01:00
Enrico Candino
f0d9b08b24 Added AsciiDoc K3k CRDs docs automation (#600)
* added asciidoc conversion for crds doc

* check versions

* use crd-ref-docs for generated asciidoc documentation

* add custom templates

* clenaup templates

* update references, rename docs file

* revert to found available pandoc version
2026-01-09 15:50:36 +01:00
Hussein Galal
a871917aec Refactor startup command to wait for node IP changes (#598)
* Patch node ip when server pod restarts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Refactor startup command and adding safe mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add date/time logging to the startup script

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2026-01-09 16:29:47 +02:00
Enrico Candino
c16eae99c7 Added AsciiDoc k3kcli automation (#597)
* adding scripts for asciidoc cli generation

* fix small typos to align to existing docs

* added pandoc

* pandoc check
2026-01-07 11:16:40 +01:00
Enrico Candino
fc6bcedc5f fix for missing label update in creation, added tests (#592) 2025-12-16 11:34:50 +01:00
Hussein Galal
0086d5aa4a Attach creation of Pseudo PV to the PVC instead of the pods (#577)
* Change creation of pseudo PV to PVC instead of pods

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Ignore not found pvs when deleting the pvc

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Avoid returning when the PV already exists

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-12-15 17:27:30 +02:00
Enrico Candino
c9bb1bcf46 Fixed CreatePod and UpdatePod in Virtual Kubelet for Downward API (#573)
* wip

* fix lint

* ephemerals container

* remove unused

* add retry

* volumes refactor

* added configmap and secret keyRef translation

* set debug logger to virtual kubelet logger

* added tests

* fix lint, removed unused func

* added test file locally
2025-12-10 13:47:34 +01:00
Enrico Candino
6d5dd8564f Bump Charts to 1.0.1 (#588) 2025-12-09 12:33:21 +01:00
Enrico Candino
93025d301b Bump Charts to 1.0.1-rc2 (#586) 2025-12-03 11:44:49 +01:00
Enrico Candino
e385ceb66f Fixed missing Kubernetes host version when specified (#585)
* fix for missing host version

* added test

* fix test

* fix test
2025-12-03 09:21:27 +01:00
Enrico Candino
5c49c3d6b7 Fix create events rbac (#575)
* cleanup logs in kubelet provider

* added events create rbac to kubelet

* fix lint, moved fetch pod logic in separate func
2025-11-25 13:48:04 +01:00
Enrico Candino
521ff17ef6 Added test for SubPathExpr (#569)
* small fixes

* added test for subpathexpr

* removed old comment
2025-11-21 16:28:59 +01:00
Enrico Candino
5b4f31ef73 bump version to 1.0.1-rc1 in Chart.yaml (#567) 2025-11-17 18:24:57 +01:00
Enrico Candino
8856419e70 added check for failing tests (#566) 2025-11-17 12:58:59 +01:00
Enrico Candino
8760afd5bc Added --namespace flag to k3kcli policy create (#564)
* added --namespace flag to policy create to actually bind the new policy to existing namespaces

* fix lint

* fix tests

* added overwrite flag

* updated cli docs

* fix tests 2

* moved double quotes to single quote

* fix test
2025-11-14 21:45:28 +01:00
Enrico Candino
27730305c2 Added labels and annotations flags to cluster and policy create (#565)
* added labels and annotations flags to cluster create

* added labels and annotations flag to create policy command
2025-11-14 16:54:01 +01:00
Enrico Candino
d0e50a580d Added cluster details in cli during creation (#562)
* added cluster details in cli during cluster creation, silenced usage, removed static persistence type from help

* fix docs
2025-11-14 12:53:15 +01:00
Enrico Candino
7dc4726bbd Fixed panic during kubeconfig generate (#554)
* fix panic during kubeconfig generate

* moved check
2025-11-11 17:18:26 +01:00
Enrico Candino
7144cf9e66 Moved CRDs to Helm templates folder (#552)
* moved CRDs of Cluster and VirtualClusterPolicy

Updated the generate script to output CRDs to the correct directory and include the keep resource policy annotation.

* fix crd directory in tests
2025-11-11 16:22:56 +01:00
Enrico Candino
de0d2a0019 Add Job Summary reports to Conformance tests (#553)
* simplify shared conformance tests

* summary

* added failed test to summary

* space

* fix failed tests file

* removed sigs test
2025-11-11 13:01:23 +01:00
Enrico Candino
a84c49f9b6 Update Go version and some deps (#551)
* bump to Go 1.24.10

* bump k8s libs to v0.31.13 and v1.31.13

* bump cli deps (cobra, viper, pflag)
2025-11-07 12:34:34 +01:00
Enrico Candino
e79e6dbfc4 add upload permissions (#550) 2025-11-06 16:53:51 +01:00
Enrico Candino
2b6441e54e Added trivy vulns check (#549)
* image check

* added k3kcli
2025-11-06 12:46:15 +01:00
Enrico Candino
49a8d2a0ba Bump Charts to 1.0.0 (#543) 2025-11-03 16:44:11 +01:00
Enrico Candino
2e6de51dab Improve tests resiliency (#539)
* fix missing namespaces cleanup

* fix conflict namespace

* fix PVC already created error, patch for existing volume, and check with hardcoded k3k name

* removed useless test

* fix for dump covdata from external pod

* keep namespaces flag

* fix for multi-node clusters

* fix for hanging pod in isolated namespace
2025-10-31 21:51:37 +01:00
Enrico Candino
90aecbbb42 Bump Charts to 1.0.0-rc3 (#542) 2025-10-31 17:01:03 +01:00
84 changed files with 3973 additions and 1118 deletions

View File

@@ -13,6 +13,10 @@ jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -34,4 +38,51 @@ jobs:
env:
REPO: ${{ github.repository }}
REGISTRY: ""
- name: Run Trivy vulnerability scanner (k3kcli)
uses: aquasecurity/trivy-action@0.28.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
scan-type: 'fs'
scan-ref: 'dist/k3kcli_linux_amd64_v1/k3kcli'
format: 'sarif'
output: 'trivy-results-k3kcli.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3kcli)
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: trivy-results-k3kcli.sarif
category: k3kcli
- name: Run Trivy vulnerability scanner (k3k)
uses: aquasecurity/trivy-action@0.28.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
scan-type: 'image'
scan-ref: '${{ github.repository }}:v0.0.0-amd64'
format: 'sarif'
output: 'trivy-results-k3k.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k)
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: trivy-results-k3k.sarif
category: k3k
- name: Run Trivy vulnerability scanner (k3k-kubelet)
uses: aquasecurity/trivy-action@0.28.0
with:
ignore-unfixed: true
severity: 'MEDIUM,HIGH,CRITICAL'
scan-type: 'image'
scan-ref: '${{ github.repository }}-kubelet:v0.0.0-amd64'
format: 'sarif'
output: 'trivy-results-k3k-kubelet.sarif'
- name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet)
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: trivy-results-k3k-kubelet.sarif
category: k3k-kubelet

34
.github/workflows/fossa.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: FOSSA Scanning
on:
push:
branches: ["main", "master", "release/**"]
workflow_dispatch:
permissions:
contents: read
id-token: write
jobs:
fossa-scanning:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
# used directly and there is no need to request specific access to EIO.
- name: Read FOSSA token
uses: rancher-eio/read-vault-secrets@main
with:
secrets: |
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
- name: FOSSA scan
uses: fossas/fossa-action@main
with:
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
# Only runs the scan and do not provide/returns any results back to the
# pipeline.
run-tests: false

View File

@@ -0,0 +1,158 @@
name: Conformance Tests - Shared Mode
on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
conformance:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
REPO_NAME: k3k-registry
REPO_PORT: 12345
run: |
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 2 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
make build
make package
make push
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
VERSION=$(make version)
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
run: |
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
nodePort:
serverPort: 30001
EOF
echo "Wait for bootstrap secret to be available"
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
k3kcli kubeconfig generate --name mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
- name: Job Summary
if: always()
run: |
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY
RESULTS=$(tail -10 /tmp/e2e.log | grep -E "Passed .* Failed .* Pending .* Skipped" | cut -d '-' -f 3)
RESULTS=$(echo $RESULTS | grep -oE '[0-9]+' | xargs | sed 's/ / | /g')
echo "| $RESULTS |" >> $GITHUB_STEP_SUMMARY
# only include failed tests section if there are any
if grep -q '\[FAIL\]' /tmp/e2e.log; then
echo '' >> $GITHUB_STEP_SUMMARY
echo '### Failed Tests' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
grep '\[FAIL\]' /tmp/e2e.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -123,3 +123,23 @@ jobs:
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
- name: Job Summary
if: always()
run: |
echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY
echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY
echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY
RESULTS=$(tail -10 /tmp/e2e.log | grep -E "Passed .* Failed .* Pending .* Skipped" | cut -d '-' -f 3)
RESULTS=$(echo $RESULTS | grep -oE '[0-9]+' | xargs | sed 's/ / | /g')
echo "| $RESULTS |" >> $GITHUB_STEP_SUMMARY
# only include failed tests section if there are any
if grep -q '\[FAIL\]' /tmp/e2e.log; then
echo '' >> $GITHUB_STEP_SUMMARY
echo '### Failed Tests' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
grep '\[FAIL\]' /tmp/e2e.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -1,302 +0,0 @@
name: Conformance Tests
on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
inputs:
test:
description: "Run specific test"
type: choice
options:
- conformance
- sig-api-machinery
- sig-apps
- sig-architecture
- sig-auth
- sig-cli
- sig-instrumentation
- sig-network
- sig-node
- sig-scheduling
- sig-storage
permissions:
contents: read
jobs:
conformance:
runs-on: ubuntu-latest
if: inputs.test == '' || inputs.test == 'conformance'
strategy:
fail-fast: false
matrix:
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
REPO_NAME: k3k-registry
REPO_PORT: 12345
run: |
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 3 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
make build
make package
make push
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
VERSION=$(make version)
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
run: |
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
nodePort:
serverPort: 30001
EOF
echo "Wait for bootstrap secret to be available"
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
k3kcli kubeconfig generate --name mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log
sigs:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
tests:
- name: sig-api-machinery
focus: '\[sig-api-machinery\].*\[Conformance\]'
- name: sig-apps
focus: '\[sig-apps\].*\[Conformance\]'
- name: sig-architecture
focus: '\[sig-architecture\].*\[Conformance\]'
- name: sig-auth
focus: '\[sig-auth\].*\[Conformance\]'
- name: sig-cli
focus: '\[sig-cli\].*\[Conformance\]'
- name: sig-instrumentation
focus: '\[sig-instrumentation\].*\[Conformance\]'
- name: sig-network
focus: '\[sig-network\].*\[Conformance\]'
- name: sig-node
focus: '\[sig-node\].*\[Conformance\]'
- name: sig-scheduling
focus: '\[sig-scheduling\].*\[Conformance\]'
- name: sig-storage
focus: '\[sig-storage\].*\[Conformance\]'
steps:
- name: Validate input and fail fast
if: inputs.test != '' && inputs.test != matrix.tests.name
run: |
echo "Failing this job as it's not the intended target."
exit 1
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3d and kubectl
run: |
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- name: Setup Kubernetes (k3d)
env:
REPO_NAME: k3k-registry
REPO_PORT: 12345
run: |
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
k3d cluster create k3k --servers 3 \
-p "30000-30010:30000-30010@server:0" \
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
kubectl cluster-info
kubectl get nodes
- name: Setup K3k
env:
REPO: k3k-registry:12345
run: |
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
make build
make package
make push
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
VERSION=$(make version)
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
make install
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
run: |
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
servers: 2
mirrorHostNodes: true
tlsSANs:
- "127.0.0.1"
expose:
nodePort:
serverPort: 30001
EOF
echo "Wait for bootstrap secret to be available"
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
k3kcli kubeconfig generate --name mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run sigs tests
run: |
FOCUS="${{ matrix.tests.focus }}"
echo "Running with --focus=${FOCUS}"
hydrophone --focus "${FOCUS}" \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: ${{ matrix.tests.name }}-logs
path: /tmp/e2e.log

View File

@@ -20,8 +20,12 @@ jobs:
with:
go-version-file: go.mod
- name: Install Pandoc
run: sudo apt-get install pandoc
- name: Validate
run: make validate
tests-e2e:
runs-on: ubuntu-latest
needs: validate

View File

@@ -37,6 +37,9 @@ jobs:
with:
go-version-file: go.mod
- name: Install Pandoc
run: sudo apt-get install pandoc
- name: Validate
run: make validate

View File

@@ -10,16 +10,17 @@ GINKGO_VERSION ?= v2.21.0
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
ENVTEST_K8S_VERSION := 1.31.0
CRD_REF_DOCS_VER ?= v0.1.0
CRD_REF_DOCS_VER ?= v0.2.0
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
PANDOC := $(shell which pandoc 2> /dev/null)
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
ENVTEST_DIR ?= $(shell pwd)/.envtest
E2E_LABEL_FILTER ?= "e2e"
E2E_LABEL_FILTER ?= e2e
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
@@ -83,12 +84,27 @@ generate: ## Generate the CRDs specs
go generate ./...
.PHONY: docs
docs: ## Build the CRDs and CLI docs
docs: docs-crds docs-cli ## Build the CRDs and CLI docs
.PHONY: docs-crds
docs-crds: ## Build the CRDs docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
--output-path=./docs/crds/crds.md
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=asciidoctor \
--templates-dir=./docs/crds/templates/asciidoctor \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crds.adoc
.PHONY: docs-cli
docs-cli: ## Build the CLI docs
ifeq (, $(PANDOC))
$(error "pandoc not found in PATH.")
endif
@./scripts/generate-cli-docs
.PHONY: lint
lint: ## Find any linting issues in the project

View File

@@ -1,6 +1,5 @@
# K3k: Kubernetes in Kubernetes
[![Experimental](https://img.shields.io/badge/status-experimental-orange.svg)](https://shields.io/)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3k)](https://goreportcard.com/report/github.com/rancher/k3k)
![Tests](https://github.com/rancher/k3k/actions/workflows/test.yaml/badge.svg)
![Build](https://github.com/rancher/k3k/actions/workflows/build.yml/badge.svg)
@@ -12,10 +11,6 @@ K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage
K3k integrates seamlessly with Rancher for simplified management of your embedded clusters.
**Experimental Tool**
This project is still under development and is considered experimental. It may have limitations, bugs, or changes. Please use with caution and report any issues you encounter. We appreciate your feedback as we continue to refine and improve this tool.
## Features and Benefits
@@ -60,7 +55,7 @@ This section provides instructions on how to install K3k and the `k3kcli`.
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
```
**NOTE:** K3k is currently under development. We recommend using the latest released version when possible.
We recommend using the latest released version when possible.
### Install the `k3kcli`
@@ -72,7 +67,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -80,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli version v0.3.5
k3kcli version v1.0.1
```

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 1.0.0-rc2
appVersion: v1.0.0-rc2
version: 1.0.2-rc1
appVersion: v1.0.2-rc1

View File

@@ -3,6 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
controller-gen.kubebuilder.io/version: v0.16.0
name: clusters.k3k.io
spec:

View File

@@ -3,6 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
helm.sh/resource-policy: keep
controller-gen.kubebuilder.io/version: v0.16.0
name: virtualclusterpolicies.k3k.io
spec:

View File

@@ -7,7 +7,7 @@ import (
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "cluster",
Short: "cluster command",
Short: "K3k cluster command.",
}
cmd.AddCommand(

View File

@@ -1,12 +1,14 @@
package cmds
import (
"bytes"
"context"
"errors"
"fmt"
"net/url"
"os"
"strings"
"text/template"
"time"
"github.com/sirupsen/logrus"
@@ -37,6 +39,8 @@ type CreateConfig struct {
agentArgs []string
serverEnvs []string
agentEnvs []string
labels []string
annotations []string
persistenceType string
storageClassName string
storageRequestSize string
@@ -54,7 +58,7 @@ func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "create",
Short: "Create new cluster",
Short: "Create a new cluster.",
Example: "k3kcli cluster create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
return validateCreateConfig(createConfig)
@@ -111,7 +115,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
}
}
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
logrus.Infof("Creating cluster '%s' in namespace '%s'", name, namespace)
cluster := newCluster(name, namespace, config)
@@ -134,19 +138,30 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
if err := client.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
logrus.Infof("Cluster '%s' already exists", name)
} else {
return err
}
}
if err := waitForClusterReconciled(ctx, client, cluster, config.timeout); err != nil {
return fmt.Errorf("failed to wait for cluster to be reconciled: %w", err)
}
clusterDetails, err := printClusterDetails(cluster)
if err != nil {
return fmt.Errorf("failed to print cluster details: %w", err)
}
logrus.Info(clusterDetails)
logrus.Infof("Waiting for cluster to be available..")
if err := waitForCluster(ctx, client, cluster, config.timeout); err != nil {
if err := waitForClusterReady(ctx, client, cluster, config.timeout); err != nil {
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
logrus.Infof("Extracting Kubeconfig for '%s' cluster", name)
// retry every 5s for at most 2m, or 25 times
availableBackoff := wait.Backoff{
@@ -173,8 +188,10 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Name: name,
Namespace: namespace,
Labels: parseKeyValuePairs(config.labels, "label"),
Annotations: parseKeyValuePairs(config.annotations, "annotation"),
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
@@ -257,7 +274,18 @@ func env(envSlice []string) []v1.EnvVar {
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
func waitForClusterReconciled(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, time.Second, timeout, false, func(ctx context.Context) (bool, error) {
key := client.ObjectKeyFromObject(cluster)
if err := k8sClient.Get(ctx, key, cluster); err != nil {
return false, fmt.Errorf("failed to get resource: %w", err)
}
return cluster.Status.HostVersion != "", nil
})
}
func waitForClusterReady(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
interval := 5 * time.Second
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
@@ -341,3 +369,73 @@ func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []by
},
}
}
func parseKeyValuePairs(pairs []string, pairType string) map[string]string {
resultMap := make(map[string]string)
for _, p := range pairs {
var k, v string
keyValue := strings.SplitN(p, "=", 2)
k = keyValue[0]
if len(keyValue) == 2 {
v = keyValue[1]
}
resultMap[k] = v
logrus.Debugf("Adding '%s=%s' %s to Cluster", k, v, pairType)
}
return resultMap
}
const clusterDetailsTemplate = `Cluster details:
Mode: {{ .Mode }}
Servers: {{ .Servers }}{{ if .Agents }}
Agents: {{ .Agents }}{{ end }}
Version: {{ if .Version }}{{ .Version }}{{ else }}{{ .HostVersion }}{{ end }} (Host: {{ .HostVersion }})
Persistence:
Type: {{.Persistence.Type}}{{ if .Persistence.StorageClassName }}
StorageClass: {{ .Persistence.StorageClassName }}{{ end }}{{ if .Persistence.StorageRequestSize }}
Size: {{ .Persistence.StorageRequestSize }}{{ end }}`
func printClusterDetails(cluster *v1beta1.Cluster) (string, error) {
type templateData struct {
Mode v1beta1.ClusterMode
Servers int32
Agents int32
Version string
HostVersion string
Persistence struct {
Type v1beta1.PersistenceMode
StorageClassName string
StorageRequestSize string
}
}
data := templateData{
Mode: cluster.Spec.Mode,
Servers: ptr.Deref(cluster.Spec.Servers, 0),
Agents: ptr.Deref(cluster.Spec.Agents, 0),
Version: cluster.Spec.Version,
HostVersion: cluster.Status.HostVersion,
}
data.Persistence.Type = cluster.Spec.Persistence.Type
data.Persistence.StorageClassName = ptr.Deref(cluster.Spec.Persistence.StorageClassName, "")
data.Persistence.StorageRequestSize = cluster.Spec.Persistence.StorageRequestSize
tmpl, err := template.New("clusterDetails").Parse(clusterDetailsTemplate)
if err != nil {
return "", err
}
var buf bytes.Buffer
if err = tmpl.Execute(&buf, data); err != nil {
return "", err
}
return buf.String(), nil
}

View File

@@ -17,13 +17,15 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments")
cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs")
cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs")
cmd.Flags().StringArrayVar(&cfg.labels, "labels", []string{}, "Labels to add to the cluster object (e.g. key=value)")
cmd.Flags().StringArrayVar(&cfg.annotations, "annotations", []string{}, "Annotations to add to the cluster object (e.g. key=value)")
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)")
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
@@ -42,7 +44,7 @@ func validateCreateConfig(cfg *CreateConfig) error {
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
return errors.New(`persistence-type should be one of "dynamic" or "ephemeral"`)
}
}

View File

@@ -0,0 +1,95 @@
package cmds
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/utils/ptr"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_printClusterDetails(t *testing.T) {
tests := []struct {
name string
cluster *v1beta1.Cluster
want string
wantErr bool
}{
{
name: "simple cluster",
cluster: &v1beta1.Cluster{
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Version: "123",
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
},
},
Status: v1beta1.ClusterStatus{
HostVersion: "456",
},
},
want: `Cluster details:
Mode: shared
Servers: 0
Version: 123 (Host: 456)
Persistence:
Type: dynamic`,
},
{
name: "simple cluster with no version",
cluster: &v1beta1.Cluster{
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
},
},
Status: v1beta1.ClusterStatus{
HostVersion: "456",
},
},
want: `Cluster details:
Mode: shared
Servers: 0
Version: 456 (Host: 456)
Persistence:
Type: dynamic`,
},
{
name: "cluster with agents",
cluster: &v1beta1.Cluster{
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Agents: ptr.To[int32](3),
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
StorageClassName: ptr.To("local-path"),
StorageRequestSize: "3gb",
},
},
Status: v1beta1.ClusterStatus{
HostVersion: "456",
},
},
want: `Cluster details:
Mode: shared
Servers: 0
Agents: 3
Version: 456 (Host: 456)
Persistence:
Type: dynamic
StorageClass: local-path
Size: 3gb`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
clusterDetails, err := printClusterDetails(tt.cluster)
assert.NoError(t, err)
assert.Equal(t, tt.want, clusterDetails)
})
}
}

View File

@@ -24,7 +24,7 @@ var keepData bool
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "delete",
Short: "Delete an existing cluster",
Short: "Delete an existing cluster.",
Example: "k3kcli cluster delete [command options] NAME",
RunE: delete(appCtx),
Args: cobra.ExactArgs(1),
@@ -48,7 +48,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
namespace := appCtx.Namespace(name)
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
logrus.Infof("Deleting '%s' cluster in namespace '%s'", name, namespace)
cluster := v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -16,7 +16,7 @@ import (
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List all the existing cluster",
Short: "List all existing clusters.",
Example: "k3kcli cluster list [command options]",
RunE: list(appCtx),
Args: cobra.NoArgs,

View File

@@ -37,7 +37,7 @@ type GenerateKubeconfigConfig struct {
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "kubeconfig",
Short: "Manage kubeconfig for clusters",
Short: "Manage kubeconfig for clusters.",
}
cmd.AddCommand(
@@ -52,7 +52,7 @@ func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "generate",
Short: "Generate kubeconfig for clusters",
Short: "Generate kubeconfig for clusters.",
RunE: generate(appCtx, cfg),
Args: cobra.NoArgs,
}

View File

@@ -7,7 +7,7 @@ import (
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "policy",
Short: "policy command",
Short: "K3k policy command.",
}
cmd.AddCommand(

View File

@@ -18,7 +18,11 @@ import (
)
type VirtualClusterPolicyCreateConfig struct {
mode string
mode string
labels []string
annotations []string
namespaces []string
overwrite bool
}
func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
@@ -26,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
cmd := &cobra.Command{
Use: "create",
Short: "Create new policy",
Short: "Create a new policy.",
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
@@ -41,6 +45,10 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
}
cmd.Flags().StringVar(&config.mode, "mode", "shared", "The allowed mode type of the policy")
cmd.Flags().StringArrayVar(&config.labels, "labels", []string{}, "Labels to add to the policy object (e.g. key=value)")
cmd.Flags().StringArrayVar(&config.annotations, "annotations", []string{}, "Annotations to add to the policy object (e.g. key=value)")
cmd.Flags().StringSliceVar(&config.namespaces, "namespace", []string{}, "The namespaces where to bind the policy")
cmd.Flags().BoolVar(&config.overwrite, "overwrite", false, "Overwrite namespace binding of existing policy")
return cmd
}
@@ -51,9 +59,12 @@ func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateCo
client := appCtx.Client
policyName := args[0]
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
_, err := createPolicy(ctx, client, config, policyName)
if err != nil {
return err
}
return err
return bindPolicyToNamespaces(ctx, client, config, policyName)
}
}
@@ -71,7 +82,7 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return err
}
logrus.Infof(`Creating namespace [%s]`, name)
logrus.Infof(`Creating namespace '%s'`, name)
if err := client.Create(ctx, ns); err != nil {
return err
@@ -81,19 +92,21 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
func createPolicy(ctx context.Context, client client.Client, config *VirtualClusterPolicyCreateConfig, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy '%s'", policyName)
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
Name: policyName,
Labels: parseKeyValuePairs(config.labels, "label"),
Annotations: parseKeyValuePairs(config.annotations, "annotation"),
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: mode,
AllowedMode: v1beta1.ClusterMode(config.mode),
},
}
@@ -102,8 +115,69 @@ func createPolicy(ctx context.Context, client client.Client, mode v1beta1.Cluste
return nil, err
}
logrus.Infof("Policy [%s] already exists", policyName)
logrus.Infof("Policy '%s' already exists", policyName)
}
return policy, nil
}
func bindPolicyToNamespaces(ctx context.Context, client client.Client, config *VirtualClusterPolicyCreateConfig, policyName string) error {
var errs []error
for _, namespace := range config.namespaces {
var ns v1.Namespace
if err := client.Get(ctx, types.NamespacedName{Name: namespace}, &ns); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf(`Namespace '%s' not found, skipping`, namespace)
} else {
errs = append(errs, err)
}
continue
}
if ns.Labels == nil {
ns.Labels = map[string]string{}
}
oldPolicy := ns.Labels[policy.PolicyNameLabelKey]
// same policy found, no need to update
if oldPolicy == policyName {
logrus.Debugf(`Policy '%s' already bound to namespace '%s'`, policyName, namespace)
continue
}
// no old policy, safe to update
if oldPolicy == "" {
ns.Labels[policy.PolicyNameLabelKey] = policyName
if err := client.Update(ctx, &ns); err != nil {
errs = append(errs, err)
} else {
logrus.Infof(`Added policy '%s' to namespace '%s'`, policyName, namespace)
}
continue
}
// different policy, warn or check for overwrite flag
if oldPolicy != policyName {
if config.overwrite {
logrus.Infof(`Found policy '%s' bound to namespace '%s'. Overwriting it with '%s'`, oldPolicy, namespace, policyName)
ns.Labels[policy.PolicyNameLabelKey] = policyName
if err := client.Update(ctx, &ns); err != nil {
errs = append(errs, err)
} else {
logrus.Infof(`Added policy '%s' to namespace '%s'`, policyName, namespace)
}
} else {
logrus.Warnf(`Found policy '%s' bound to namespace '%s'. Skipping. To overwrite it use the --overwrite flag`, oldPolicy, namespace)
}
}
}
return errors.Join(errs...)
}

View File

@@ -14,7 +14,7 @@ import (
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "delete",
Short: "Delete an existing policy",
Short: "Delete an existing policy.",
Example: "k3kcli policy delete [command options] NAME",
RunE: policyDeleteAction(appCtx),
Args: cobra.ExactArgs(1),
@@ -31,13 +31,17 @@ func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []stri
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("Policy not found")
} else {
if !apierrors.IsNotFound(err) {
return err
}
logrus.Warnf("Policy '%s' not found", name)
return nil
}
logrus.Infof("Policy '%s' deleted", name)
return nil
}
}

View File

@@ -15,7 +15,7 @@ import (
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
return &cobra.Command{
Use: "list",
Short: "List all the existing policies",
Short: "List all existing policies.",
Example: "k3kcli policy list [command options]",
RunE: policyList(appCtx),
Args: cobra.NoArgs,

View File

@@ -34,9 +34,10 @@ func NewRootCmd() *cobra.Command {
appCtx := &AppContext{}
rootCmd := &cobra.Command{
Use: "k3kcli",
Short: "CLI for K3K",
Version: buildinfo.Version,
SilenceUsage: true,
Use: "k3kcli",
Short: "CLI for K3K.",
Version: buildinfo.Version,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
InitializeConfig(cmd)

View File

@@ -4,7 +4,7 @@ This document provides advanced usage information for k3k, including detailed us
## Customizing the Cluster Resource
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crds.md) for the full specs.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
@@ -115,7 +115,7 @@ The `serverArgs` field allows you to specify additional arguments to be passed t
## Using the cli
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
You can check the [k3kcli documentation](./cli/k3kcli.md) for the full specs.
### No storage provider:

View File

@@ -104,7 +104,7 @@ Common use cases for administrators leveraging VirtualClusterPolicy include:
The K3k controller actively monitors VirtualClusterPolicy resources and the corresponding Namespace bindings. When a VCP is applied or updated, the controller ensures that the defined configurations are enforced on the relevant virtual clusters and their associated resources within the targeted Namespaces.
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crds.md#virtualclusterpolicy).
## Comparison and Trade-offs

25
docs/cli/convert.lua Normal file
View File

@@ -0,0 +1,25 @@
local deleting_see_also = false
function Header(el)
-- If we hit "SEE ALSO", start deleting and remove the header itself
if pandoc.utils.stringify(el):upper() == "SEE ALSO" then
deleting_see_also = true
return {}
end
-- If we hit any other header, stop deleting
deleting_see_also = false
return el
end
function BulletList(el)
if deleting_see_also then
return {} -- Deletes the list of links
end
return el
end
function CodeBlock(el)
-- Forces the ---- separator
local content = "----\n" .. el.text .. "\n----\n\n"
return pandoc.RawBlock('asciidoc', content)
end

283
docs/cli/k3kcli.adoc Normal file
View File

@@ -0,0 +1,283 @@
== k3kcli
CLI for K3K.
=== Options
----
--debug Turn on debug logs
-h, --help help for k3kcli
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli cluster
K3k cluster command.
=== Options
----
-h, --help help for cluster
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli cluster create
Create a new cluster.
----
k3kcli cluster create [flags]
----
=== Examples
----
k3kcli cluster create [command options] NAME
----
=== Options
----
--agent-args strings agents extra arguments
--agent-envs strings agents extra Envs
--agents int number of agents
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
--cluster-cidr string cluster CIDR
--custom-certs string The path for custom certificate directory
-h, --help help for create
--kubeconfig-server string override the kubeconfig server host
--labels stringArray Labels to add to the cluster object (e.g. key=value)
--mirror-host-nodes Mirror Host Cluster Nodes
--mode string k3k mode type (shared, virtual) (default "shared")
-n, --namespace string namespace of the k3k cluster
--persistence-type string persistence mode for the nodes (dynamic, ephemeral) (default "dynamic")
--policy string The policy to create the cluster in
--server-args strings servers extra arguments
--server-envs strings servers extra Envs
--servers int number of servers (default 1)
--service-cidr string service CIDR
--storage-class-name string storage class name for dynamic persistence type
--storage-request-size string storage size for dynamic persistence type
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
--token string token of the cluster
--version string k3s version
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli cluster delete
Delete an existing cluster.
----
k3kcli cluster delete [flags]
----
=== Examples
----
k3kcli cluster delete [command options] NAME
----
=== Options
----
-h, --help help for delete
--keep-data keeps persistence volumes created for the cluster after deletion
-n, --namespace string namespace of the k3k cluster
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli cluster list
List all existing clusters.
----
k3kcli cluster list [flags]
----
=== Examples
----
k3kcli cluster list [command options]
----
=== Options
----
-h, --help help for list
-n, --namespace string namespace of the k3k cluster
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli kubeconfig
Manage kubeconfig for clusters.
=== Options
----
-h, --help help for kubeconfig
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli kubeconfig generate
Generate kubeconfig for clusters.
----
k3kcli kubeconfig generate [flags]
----
=== Options
----
--altNames strings altNames of the generated certificates for the kubeconfig
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
--config-name string the name of the generated kubeconfig file
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
-h, --help help for generate
--kubeconfig-server string override the kubeconfig server host
--name string cluster name
-n, --namespace string namespace of the k3k cluster
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli policy
K3k policy command.
=== Options
----
-h, --help help for policy
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli policy create
Create a new policy.
----
k3kcli policy create [flags]
----
=== Examples
----
k3kcli policy create [command options] NAME
----
=== Options
----
--annotations stringArray Annotations to add to the policy object (e.g. key=value)
-h, --help help for create
--labels stringArray Labels to add to the policy object (e.g. key=value)
--mode string The allowed mode type of the policy (default "shared")
--namespace strings The namespaces where to bind the policy
--overwrite Overwrite namespace binding of existing policy
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli policy delete
Delete an existing policy.
----
k3kcli policy delete [flags]
----
=== Examples
----
k3kcli policy delete [command options] NAME
----
=== Options
----
-h, --help help for delete
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----
== k3kcli policy list
List all existing policies.
----
k3kcli policy list [flags]
----
=== Examples
----
k3kcli policy list [command options]
----
=== Options
----
-h, --help help for list
----
=== Options inherited from parent commands
----
--debug Turn on debug logs
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
----

View File

@@ -1,6 +1,6 @@
## k3kcli
CLI for K3K
CLI for K3K.
### Options
@@ -12,7 +12,7 @@ CLI for K3K
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
* [k3kcli policy](k3kcli_policy.md) - policy command
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters.
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.

View File

@@ -1,6 +1,6 @@
## k3kcli cluster
cluster command
K3k cluster command.
### Options
@@ -17,8 +17,8 @@ cluster command
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster
* [k3kcli](k3kcli.md) - CLI for K3K.
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create a new cluster.
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster.
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all existing clusters.

View File

@@ -1,6 +1,6 @@
## k3kcli cluster create
Create new cluster
Create a new cluster.
```
k3kcli cluster create [flags]
@@ -18,14 +18,16 @@ k3kcli cluster create [command options] NAME
--agent-args strings agents extra arguments
--agent-envs strings agents extra Envs
--agents int number of agents
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
--cluster-cidr string cluster CIDR
--custom-certs string The path for custom certificate directory
-h, --help help for create
--kubeconfig-server string override the kubeconfig server host
--labels stringArray Labels to add to the cluster object (e.g. key=value)
--mirror-host-nodes Mirror Host Cluster Nodes
--mode string k3k mode type (shared, virtual) (default "shared")
-n, --namespace string namespace of the k3k cluster
--persistence-type string persistence mode for the nodes (dynamic, ephemeral, static) (default "dynamic")
--persistence-type string persistence mode for the nodes (dynamic, ephemeral) (default "dynamic")
--policy string The policy to create the cluster in
--server-args strings servers extra arguments
--server-envs strings servers extra Envs
@@ -47,5 +49,5 @@ k3kcli cluster create [command options] NAME
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.

View File

@@ -1,6 +1,6 @@
## k3kcli cluster delete
Delete an existing cluster
Delete an existing cluster.
```
k3kcli cluster delete [flags]
@@ -29,5 +29,5 @@ k3kcli cluster delete [command options] NAME
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.

View File

@@ -1,6 +1,6 @@
## k3kcli cluster list
List all the existing cluster
List all existing clusters.
```
k3kcli cluster list [flags]
@@ -28,5 +28,5 @@ k3kcli cluster list [command options]
### SEE ALSO
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.

View File

@@ -1,6 +1,6 @@
## k3kcli kubeconfig
Manage kubeconfig for clusters
Manage kubeconfig for clusters.
### Options
@@ -17,6 +17,6 @@ Manage kubeconfig for clusters
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters
* [k3kcli](k3kcli.md) - CLI for K3K.
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters.

View File

@@ -1,6 +1,6 @@
## k3kcli kubeconfig generate
Generate kubeconfig for clusters
Generate kubeconfig for clusters.
```
k3kcli kubeconfig generate [flags]
@@ -29,5 +29,5 @@ k3kcli kubeconfig generate [flags]
### SEE ALSO
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters.

View File

@@ -1,6 +1,6 @@
## k3kcli policy
policy command
K3k policy command.
### Options
@@ -17,8 +17,8 @@ policy command
### SEE ALSO
* [k3kcli](k3kcli.md) - CLI for K3K
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies
* [k3kcli](k3kcli.md) - CLI for K3K.
* [k3kcli policy create](k3kcli_policy_create.md) - Create a new policy.
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy.
* [k3kcli policy list](k3kcli_policy_list.md) - List all existing policies.

View File

@@ -1,6 +1,6 @@
## k3kcli policy create
Create new policy
Create a new policy.
```
k3kcli policy create [flags]
@@ -15,8 +15,12 @@ k3kcli policy create [command options] NAME
### Options
```
-h, --help help for create
--mode string The allowed mode type of the policy (default "shared")
--annotations stringArray Annotations to add to the policy object (e.g. key=value)
-h, --help help for create
--labels stringArray Labels to add to the policy object (e.g. key=value)
--mode string The allowed mode type of the policy (default "shared")
--namespace strings The namespaces where to bind the policy
--overwrite Overwrite namespace binding of existing policy
```
### Options inherited from parent commands
@@ -28,5 +32,5 @@ k3kcli policy create [command options] NAME
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.

View File

@@ -1,6 +1,6 @@
## k3kcli policy delete
Delete an existing policy
Delete an existing policy.
```
k3kcli policy delete [flags]
@@ -27,5 +27,5 @@ k3kcli policy delete [command options] NAME
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.

View File

@@ -1,6 +1,6 @@
## k3kcli policy list
List all the existing policies
List all existing policies.
```
k3kcli policy list [flags]
@@ -27,5 +27,5 @@ k3kcli policy list [command options]
### SEE ALSO
* [k3kcli policy](k3kcli_policy.md) - policy command
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.

View File

@@ -1,8 +1,8 @@
processor:
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
ignoreFields:
- "status$"
- "TypeMeta$"
- "status$"
- "TypeMeta$"
render:
# Version of Kubernetes to use when generating links to Kubernetes API documentation.

645
docs/crds/crds.adoc Normal file
View File

@@ -0,0 +1,645 @@
[id="k3k-api-reference"]
= API Reference
:revdate: "2006-01-02"
:page-revdate: {revdate}
:anchor_prefix: k8s-api
== Packages
- xref:{anchor_prefix}-k3k-io-v1beta1[$$k3k.io/v1beta1$$]
[id="{anchor_prefix}-k3k-io-v1beta1"]
== k3k.io/v1beta1
=== Resource Types
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist[$$ClusterList$$]
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist[$$VirtualClusterPolicyList$$]
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon"]
=== Addon
Addon specifies a Secret containing YAML to be deployed on cluster startup.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`secretNamespace`* __string__ | SecretNamespace is the namespace of the Secret. + | |
| *`secretRef`* __string__ | SecretRef is the name of the Secret. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
=== Cluster
Cluster defines a virtual Kubernetes cluster managed by k3k.
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
k3k uses this to provision and manage these virtual clusters.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist[$$ClusterList$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
| *`kind`* __string__ | `Cluster` | |
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]__ | Spec defines the desired state of the Cluster. + | { } |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist"]
=== ClusterList
ClusterList is a list of Cluster resources.
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
| *`kind`* __string__ | `ClusterList` | |
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta[$$ListMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`items`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$] array__ | | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode"]
=== ClusterMode
_Underlying type:_ _string_
ClusterMode is the possible provisioning mode of a Cluster.
_Validation:_
- Enum: [shared virtual]
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterphase"]
=== ClusterPhase
_Underlying type:_ _string_
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec"]
=== ClusterSpec
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`version`* __string__ | Version is the K3s version to use for the virtual nodes. +
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1). +
If not specified, the Kubernetes version of the host node will be used. + | |
| *`mode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | Mode specifies the cluster provisioning mode: "shared" or "virtual". +
Defaults to "shared". This field is immutable. + | shared | Enum: [shared virtual] +
| *`servers`* __integer__ | Servers specifies the number of K3s pods to run in server (control plane) mode. +
Must be at least 1. Defaults to 1. + | 1 |
| *`agents`* __integer__ | Agents specifies the number of K3s pods to run in agent (worker) mode. +
Must be 0 or greater. Defaults to 0. +
This field is ignored in "shared" mode. + | 0 |
| *`clusterCIDR`* __string__ | ClusterCIDR is the CIDR range for pod IPs. +
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode. +
This field is immutable. + | |
| *`serviceCIDR`* __string__ | ServiceCIDR is the CIDR range for service IPs. +
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode. +
This field is immutable. + | |
| *`clusterDNS`* __string__ | ClusterDNS is the IP address for the CoreDNS service. +
Must be within the ServiceCIDR range. Defaults to 10.43.0.10. +
This field is immutable. + | |
| *`persistence`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig[$$PersistenceConfig$$]__ | Persistence specifies options for persisting etcd data. +
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence. +
A default StorageClass is required for dynamic persistence. + | |
| *`expose`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]__ | Expose specifies options for exposing the API server. +
By default, it's only exposed as a ClusterIP. + | |
| *`nodeSelector`* __object (keys:string, values:string)__ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled. +
In "shared" mode, this also applies to workloads. + | |
| *`priorityClass`* __string__ | PriorityClass specifies the priorityClassName for server/agent pods. +
In "shared" mode, this also applies to workloads. + | |
| *`tokenSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core[$$SecretReference$$]__ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster. +
The Secret must have a "token" field in its data. + | |
| *`tlsSANs`* __string array__ | TLSSANs specifies subject alternative names for the K3s server certificate. + | |
| *`serverArgs`* __string array__ | ServerArgs specifies ordered key-value pairs for K3s server pods. +
Example: ["--tls-san=example.com"] + | |
| *`agentArgs`* __string array__ | AgentArgs specifies ordered key-value pairs for K3s agent pods. +
Example: ["--node-name=my-agent-node"] + | |
| *`serverEnvs`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core[$$EnvVar$$] array__ | ServerEnvs specifies list of environment variables to set in the server pod. + | |
| *`agentEnvs`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core[$$EnvVar$$] array__ | AgentEnvs specifies list of environment variables to set in the agent pod. + | |
| *`addons`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon[$$Addon$$] array__ | Addons specifies secrets containing raw YAML to deploy on cluster startup. + | |
| *`serverLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | ServerLimit specifies resource limits for server nodes. + | |
| *`workerLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | WorkerLimit specifies resource limits for agent nodes. + | |
| *`mirrorHostNodes`* __boolean__ | MirrorHostNodes controls whether node objects from the host cluster +
are mirrored into the virtual cluster. + | |
| *`customCAs`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]__ | CustomCAs specifies the cert/key pairs for custom CA certificates. + | |
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig"]
=== ConfigMapSyncConfig
ConfigMapSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource"]
=== CredentialSource
CredentialSource defines where to get a credential from.
It can represent either a TLS key pair or a single private key.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources[$$CredentialSources$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`secretName`* __string__ | SecretName specifies the name of an existing secret to use. +
The controller expects specific keys inside based on the credential type: +
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'. +
- For ServiceAccountTokenKey: 'tls.key'. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources"]
=== CredentialSources
CredentialSources lists all the required credentials, including both
TLS key pairs and single signing keys.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`serverCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ServerCA specifies the server-ca cert/key pair. + | |
| *`clientCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ClientCA specifies the client-ca cert/key pair. + | |
| *`requestHeaderCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | RequestHeaderCA specifies the request-header-ca cert/key pair. + | |
| *`etcdServerCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ETCDServerCA specifies the etcd-server-ca cert/key pair. + | |
| *`etcdPeerCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. + | |
| *`serviceAccountToken`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ServiceAccountToken specifies the service-account-token key. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas"]
=== CustomCAs
CustomCAs specifies the cert/key pairs for custom CA certificates.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled toggles this feature on or off. + | true |
| *`sources`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources[$$CredentialSources$$]__ | Sources defines the sources for all required custom CA certificates. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig"]
=== ExposeConfig
ExposeConfig specifies options for exposing the API server.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`ingress`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingressconfig[$$IngressConfig$$]__ | Ingress specifies options for exposing the API server through an Ingress. + | |
| *`loadBalancer`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-loadbalancerconfig[$$LoadBalancerConfig$$]__ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. + | |
| *`nodePort`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-nodeportconfig[$$NodePortConfig$$]__ | NodePort specifies options for exposing the API server through NodePort. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingressconfig"]
=== IngressConfig
IngressConfig specifies options for exposing the API server through an Ingress.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`annotations`* __object (keys:string, values:string)__ | Annotations specifies annotations to add to the Ingress. + | |
| *`ingressClassName`* __string__ | IngressClassName specifies the IngressClass to use for the Ingress. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig"]
=== IngressSyncConfig
IngressSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-loadbalancerconfig"]
=== LoadBalancerConfig
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`serverPort`* __integer__ | ServerPort is the port on which the K3s server is exposed when type is LoadBalancer. +
If not specified, the default https 443 port will be allocated. +
If 0 or negative, the port will not be exposed. + | |
| *`etcdPort`* __integer__ | ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer. +
If not specified, the default etcd 2379 port will be allocated. +
If 0 or negative, the port will not be exposed. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-nodeportconfig"]
=== NodePortConfig
NodePortConfig specifies options for exposing the API server through NodePort.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`serverPort`* __integer__ | ServerPort is the port on each node on which the K3s server is exposed when type is NodePort. +
If not specified, a random port between 30000-32767 will be allocated. +
If out of range, the port will not be exposed. + | |
| *`etcdPort`* __integer__ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort. +
If not specified, a random port between 30000-32767 will be allocated. +
If out of range, the port will not be exposed. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig"]
=== PersistenceConfig
PersistenceConfig specifies options for persisting etcd data.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`type`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode[$$PersistenceMode$$]__ | Type specifies the persistence mode. + | dynamic |
| *`storageClassName`* __string__ | StorageClassName is the name of the StorageClass to use for the PVC. +
This field is only relevant in "dynamic" mode. + | |
| *`storageRequestSize`* __string__ | StorageRequestSize is the requested size for the PVC. +
This field is only relevant in "dynamic" mode. + | 2G |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode"]
=== PersistenceMode
_Underlying type:_ _string_
PersistenceMode is the storage mode of a Cluster.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig[$$PersistenceConfig$$]
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig"]
=== PersistentVolumeClaimSyncConfig
PersistentVolumeClaimSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel"]
=== PodSecurityAdmissionLevel
_Underlying type:_ _string_
PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
_Validation:_
- Enum: [privileged baseline restricted]
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig"]
=== PriorityClassSyncConfig
PriorityClassSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig"]
=== SecretSyncConfig
SecretSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-servicesyncconfig"]
=== ServiceSyncConfig
ServiceSyncConfig specifies the sync options for services.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
then all resources of the given type will be synced. + | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
=== SyncConfig
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`services`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-servicesyncconfig[$$ServiceSyncConfig$$]__ | Services resources sync configuration. + | { enabled:true } |
| *`configMaps`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig[$$ConfigMapSyncConfig$$]__ | ConfigMaps resources sync configuration. + | { enabled:true } |
| *`secrets`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig[$$SecretSyncConfig$$]__ | Secrets resources sync configuration. + | { enabled:true } |
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy"]
=== VirtualClusterPolicy
VirtualClusterPolicy allows defining common configurations and constraints
for clusters within a clusterpolicy.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist[$$VirtualClusterPolicyList$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
| *`kind`* __string__ | `VirtualClusterPolicy` | |
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]__ | Spec defines the desired state of the VirtualClusterPolicy. + | { } |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist"]
=== VirtualClusterPolicyList
VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
| *`kind`* __string__ | `VirtualClusterPolicyList` | |
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta[$$ListMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
| |
| *`items`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$] array__ | | |
|===
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec"]
=== VirtualClusterPolicySpec
VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
_Appears In:_
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
| *`quota`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcequotaspec-v1-core[$$ResourceQuotaSpec$$]__ | Quota specifies the resource limits for clusters within a clusterpolicy. + | |
| *`limit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core[$$LimitRangeSpec$$]__ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy +
to set defaults and constraints (min/max) + | |
| *`defaultNodeSelector`* __object (keys:string, values:string)__ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. + | |
| *`defaultPriorityClass`* __string__ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. + | |
| *`allowedMode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". + | shared | Enum: [shared virtual] +
| *`disableNetworkPolicy`* __boolean__ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. + | |
| *`podSecurityAdmissionLevel`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel[$$PodSecurityAdmissionLevel$$]__ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. + | | Enum: [privileged baseline restricted] +
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|===

View File

@@ -0,0 +1,19 @@
{{- define "gvDetails" -}}
{{- $gv := . -}}
[id="{{ asciidocGroupVersionID $gv | asciidocRenderAnchorID }}"]
== {{ $gv.GroupVersionString }}
{{ $gv.Doc }}
{{- if $gv.Kinds }}
=== Resource Types
{{- range $gv.SortedKinds }}
- {{ $gv.TypeForKind . | asciidocRenderTypeLink }}
{{- end }}
{{ end }}
{{ range $gv.SortedTypes }}
{{ template "type" . }}
{{ end }}
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{- define "gvList" -}}
{{- $groupVersions := . -}}
[id="k3k-api-reference"]
= API Reference
:revdate: "2006-01-02"
:page-revdate: {revdate}
:anchor_prefix: k8s-api
== Packages
{{- range $groupVersions }}
- {{ asciidocRenderGVLink . }}
{{- end }}
{{ range $groupVersions }}
{{ template "gvDetails" . }}
{{ end }}
{{- end -}}

View File

@@ -0,0 +1,43 @@
{{- define "type" -}}
{{- $type := . -}}
{{- if asciidocShouldRenderType $type -}}
[id="{{ asciidocTypeID $type | asciidocRenderAnchorID }}"]
=== {{ $type.Name }}
{{ if $type.IsAlias }}_Underlying type:_ _{{ asciidocRenderTypeLink $type.UnderlyingType }}_{{ end }}
{{ $type.Doc }}
{{ if $type.Validation -}}
_Validation:_
{{- range $type.Validation }}
- {{ . }}
{{- end }}
{{- end }}
{{ if $type.References -}}
_Appears In:_
{{ range $type.SortedReferences }}
* {{ asciidocRenderTypeLink . }}
{{- end }}
{{- end }}
{{ if $type.Members -}}
[cols="25a,55a,10a,10a", options="header"]
|===
| Field | Description | Default | Validation
{{ if $type.GVK -}}
| *`apiVersion`* __string__ | `{{ $type.GVK.Group }}/{{ $type.GVK.Version }}` | |
| *`kind`* __string__ | `{{ $type.GVK.Kind }}` | |
{{ end -}}
{{ range $type.Members -}}
| *`{{ .Name }}`* __{{ asciidocRenderType .Type }}__ | {{ template "type_members" . }} | {{ .Default }} | {{ range .Validation -}} {{ asciidocRenderValidation . }} +
{{ end }}
{{ end -}}
|===
{{ end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,8 @@
{{- define "type_members" -}}
{{- $field := . -}}
{{- if eq $field.Name "metadata" -}}
Refer to Kubernetes API documentation for fields of `metadata`.
{{ else -}}
{{ asciidocRenderFieldDoc $field.Doc }}
{{- end -}}
{{- end -}}

View File

@@ -3,8 +3,8 @@
This guide walks through the various ways to create and manage virtual clusters in K3K. We'll cover common use cases using both the **Custom Resource Definitions (CRDs)** and the **K3K CLI**, so you can choose the method that fits your workflow.
> 📘 For full reference:
> - [CRD Reference Documentation](../crds/crd-docs.md)
> - [CLI Reference Documentation](../cli/cli-docs.md)
> - [CRD Reference Documentation](../crds/crds.md)
> - [CLI Reference Documentation](../cli/k3kcli.md)
> - [Full example](../advanced-usage.md)
> [!NOTE]

View File

@@ -143,5 +143,5 @@ spec:
## Further Reading
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crds.md#virtualclusterpolicy).
* To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document.

63
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/rancher/k3k
go 1.24.2
go 1.24.10
replace (
github.com/google/cel-go => github.com/google/cel-go v0.20.1
@@ -18,8 +18,10 @@ require (
github.com/onsi/gomega v1.36.0
github.com/rancher/dynamiclistener v1.27.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
github.com/spf13/viper v1.21.0
github.com/stretchr/testify v1.11.1
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
@@ -28,17 +30,17 @@ require (
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.31.4
k8s.io/apiextensions-apiserver v0.31.4
k8s.io/apimachinery v0.31.4
k8s.io/apiserver v0.31.4
k8s.io/cli-runtime v0.31.4
k8s.io/client-go v0.31.4
k8s.io/component-base v0.31.4
k8s.io/component-helpers v0.31.4
k8s.io/kubectl v0.31.4
k8s.io/kubelet v0.31.4
k8s.io/kubernetes v1.31.4
k8s.io/api v0.31.13
k8s.io/apiextensions-apiserver v0.31.13
k8s.io/apimachinery v0.31.13
k8s.io/apiserver v0.31.13
k8s.io/cli-runtime v0.31.13
k8s.io/client-go v0.31.13
k8s.io/component-base v0.31.13
k8s.io/component-helpers v0.31.13
k8s.io/kubectl v0.31.13
k8s.io/kubelet v0.31.13
k8s.io/kubernetes v1.31.13
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.19.4
)
@@ -86,7 +88,7 @@ require (
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
@@ -96,7 +98,7 @@ require (
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -153,7 +155,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -164,15 +166,13 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -196,16 +196,17 @@ require (
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/tools v0.35.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
@@ -216,7 +217,7 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.4 // indirect
k8s.io/kms v0.31.13 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect

117
go.sum
View File

@@ -137,8 +137,8 @@ github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -166,8 +166,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -353,8 +353,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
@@ -388,8 +388,8 @@ github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmi
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
@@ -404,18 +404,19 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -433,8 +434,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo=
@@ -520,6 +522,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -527,8 +531,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
@@ -560,8 +565,9 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
@@ -578,8 +584,9 @@ golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -603,8 +610,9 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
@@ -617,8 +625,9 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -633,8 +642,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -647,8 +656,8 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
@@ -700,34 +709,34 @@ helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM=
helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.31.4 h1:I2QNzitPVsPeLQvexMEsj945QumYraqv9m74isPDKhM=
k8s.io/api v0.31.4/go.mod h1:d+7vgXLvmcdT1BCo79VEgJxHHryww3V5np2OYTr6jdw=
k8s.io/apiextensions-apiserver v0.31.4 h1:FxbqzSvy92Ca9DIs5jqot883G0Ln/PGXfm/07t39LS0=
k8s.io/apiextensions-apiserver v0.31.4/go.mod h1:hIW9YU8UsqZqIWGG99/gsdIU0Ar45Qd3A12QOe/rvpg=
k8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM=
k8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.31.4 h1:JbtnTaXVYEAYIHJil6Wd74Wif9sd8jVcBw84kwEmp7o=
k8s.io/apiserver v0.31.4/go.mod h1:JJjoTjZ9PTMLdIFq7mmcJy2B9xLN3HeAUebW6xZyIP0=
k8s.io/cli-runtime v0.31.4 h1:iczCWiyXaotW+hyF5cWP8RnEYBCzZfJUF6otJ2m9mw0=
k8s.io/cli-runtime v0.31.4/go.mod h1:0/pRzAH7qc0hWx40ut1R4jLqiy2w/KnbqdaAI2eFG8U=
k8s.io/client-go v0.31.4 h1:t4QEXt4jgHIkKKlx06+W3+1JOwAFU/2OPiOo7H92eRQ=
k8s.io/client-go v0.31.4/go.mod h1:kvuMro4sFYIa8sulL5Gi5GFqUPvfH2O/dXuKstbaaeg=
k8s.io/component-base v0.31.4 h1:wCquJh4ul9O8nNBSB8N/o8+gbfu3BVQkVw9jAUY/Qtw=
k8s.io/component-base v0.31.4/go.mod h1:G4dgtf5BccwiDT9DdejK0qM6zTK0jwDGEKnCmb9+u/s=
k8s.io/component-helpers v0.31.4 h1:pqokuXozyWVrVBMmx0AMcKqNWqXhR00OZvpAE5hG5NM=
k8s.io/component-helpers v0.31.4/go.mod h1:Ddq5GYRK/1uNoPNgJh9N5osPutvBweQEcIG6b8kcvgQ=
k8s.io/api v0.31.13 h1:sco9Cq2pY4Ysv9qZiWzcR97MmA/35nwYQ/VCTzOcWmc=
k8s.io/api v0.31.13/go.mod h1:4D8Ry8RqqLDemNLwGYC6v5wOy51N7hitr4WQ6oSWfLY=
k8s.io/apiextensions-apiserver v0.31.13 h1:8xtWKVpV/YbYX0UX2k6w+cgxfxKhX0UNGuo/VXAdg8g=
k8s.io/apiextensions-apiserver v0.31.13/go.mod h1:zxpMLWXBxnJqKUIruJ+ulP+Xlfe5lPZPxq1z0cLwA2U=
k8s.io/apimachinery v0.31.13 h1:rkG0EiBkBkEzURo/8dKGx/oBF202Z2LqHuSD8Cm3bG4=
k8s.io/apimachinery v0.31.13/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.31.13 h1:Ke9/X2m3vHSgsminpAbUxULDNMbvAfjrRX73Gqx6CZc=
k8s.io/apiserver v0.31.13/go.mod h1:5nBPhL2g7am/CS+/OI5A6+olEbo0C7tQ8QNDODLd+WY=
k8s.io/cli-runtime v0.31.13 h1:oz37PuIe4JyUDfTf8JKcZye1obyYAwF146gRpcj+AR8=
k8s.io/cli-runtime v0.31.13/go.mod h1:x6QI7U97fvrplKgd3JEvCpoZKR9AorjvDjBEr1GZG+g=
k8s.io/client-go v0.31.13 h1:Q0LG51uFbzNd9fzIj5ilA0Sm1wUholHvDaNwVKzqdCA=
k8s.io/client-go v0.31.13/go.mod h1:UB4yTzQeRAv+vULOKp2jdqA5LSwV55bvc3RQ5tM48LM=
k8s.io/component-base v0.31.13 h1:/uVLq7yHk9azReqeCFAZSr/8NXydzpz7yDZ6p/yiwBQ=
k8s.io/component-base v0.31.13/go.mod h1:uMXtKNyDqeNdZYL6SRCr9wB6FutL9pOlQmkK2dRVAKQ=
k8s.io/component-helpers v0.31.13 h1:Yy7j+Va7u6v0DXaKqMEOfIcq5pFnvzFcSGM58/lskeA=
k8s.io/component-helpers v0.31.13/go.mod h1:nXTLwkwCjXcrPG62D0IYiKuKi6JkFM2mBe2myrOUeug=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.31.4 h1:DVk9T1PHxG7IUMfWs1sDhBTbzGnM7lhMJO8lOzOzTIs=
k8s.io/kms v0.31.4/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
k8s.io/kms v0.31.13 h1:pJCG79BqdCmGetUsETwKfq+OE/D3M1DdqH14EKQl0lI=
k8s.io/kms v0.31.13/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kubectl v0.31.4 h1:c8Af8xd1VjyoKyWMW0xHv2+tYxEjne8s6OOziMmaD10=
k8s.io/kubectl v0.31.4/go.mod h1:0E0rpXg40Q57wRE6LB9su+4tmwx1IzZrmIEvhQPk0i4=
k8s.io/kubelet v0.31.4 h1:6TokbMv+HnFG7Oe9tVS/J0VPGdC4GnsQZXuZoo7Ixi8=
k8s.io/kubelet v0.31.4/go.mod h1:8ZM5LZyANoVxUtmayUxD/nsl+6GjREo7kSanv8AoL4U=
k8s.io/kubernetes v1.31.4 h1:VQDX52gTQnq8C/jCo48AQuDsWbWMh9XXxhQRDYjgakw=
k8s.io/kubernetes v1.31.4/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs=
k8s.io/kubectl v0.31.13 h1:VcSyzFsZ7Fi991FzK80hy+9clUIhChbnQg2L6eZRQzA=
k8s.io/kubectl v0.31.13/go.mod h1:IxUKvsKrvqEL7NcaBCQCVDLzcYghu8b9yMiYKx8nYho=
k8s.io/kubelet v0.31.13 h1:wN9NXmj9DRFTMph1EhAtdQ6+UfEHKV3B7XMKcJr122c=
k8s.io/kubelet v0.31.13/go.mod h1:DxEqJViO7GE5dZXvEJGsP5HORNTSj9MhMQi1JDirCQs=
k8s.io/kubernetes v1.31.13 h1:c/YugS3TqG6YQMNRclrcWVabgIuqyap++lM5AuCtD5M=
k8s.io/kubernetes v1.31.13/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=

View File

@@ -5,6 +5,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
@@ -12,6 +13,7 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -22,6 +24,7 @@ import (
const (
pvcControllerName = "pvc-syncer-controller"
pvcFinalizerName = "pvc.k3k.io/finalizer"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
)
type PVCReconciler struct {
@@ -105,6 +108,12 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// delete the synced virtual PV
if err := r.VirtualClient.Delete(ctx, newPersistentVolume(&virtPVC)); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// remove the finalizer after cleaning up the synced pvc
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
@@ -127,7 +136,13 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
if err := r.HostClient.Create(ctx, syncedPVC); err != nil && !apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
// Creating a virtual PV to bound the existing PVC in the virtual cluster - needed for scheduling of
// the consumer pods
return reconcile.Result{}, r.createVirtualPersistentVolume(ctx, virtPVC)
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
@@ -136,3 +151,82 @@ func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeC
return hostPVC
}
func (r *PVCReconciler) createVirtualPersistentVolume(ctx context.Context, pvc v1.PersistentVolumeClaim) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Creating virtual PersistentVolume")
pv := newPersistentVolume(&pvc)
if err := r.VirtualClient.Create(ctx, pv); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
orig := pv.DeepCopy()
pv.Status = v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
}
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
return err
}
log.V(1).Info("Patch the status of PersistentVolumeClaim to Bound")
pvcPatch := pvc.DeepCopy()
if pvcPatch.Annotations == nil {
pvcPatch.Annotations = make(map[string]string)
}
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
pvcPatch.Status.Phase = v1.ClaimBound
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
return r.VirtualClient.Status().Update(ctx, pvcPatch)
}
func newPersistentVolume(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
var storageClass string
if obj.Spec.StorageClassName != nil {
storageClass = *obj.Spec.StorageClassName
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
Labels: map[string]string{
pseudoPVLabel: "true",
},
Annotations: map[string]string{
volume.AnnBoundByController: "true",
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
},
},
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolume",
APIVersion: "v1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "pseudopv",
},
},
StorageClassName: storageClass,
VolumeMode: obj.Spec.VolumeMode,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
AccessModes: obj.Spec.AccessModes,
Capacity: obj.Spec.Resources.Requests,
ClaimRef: &v1.ObjectReference{
APIVersion: obj.APIVersion,
UID: obj.UID,
ResourceVersion: obj.ResourceVersion,
Kind: obj.Kind,
Namespace: obj.Namespace,
Name: obj.Name,
},
},
}
}

View File

@@ -55,7 +55,7 @@ var PVCTests = func() {
Expect(err).NotTo(HaveOccurred())
})
It("creates a pvc on the host cluster", func() {
It("creates a pvc on the host cluster and virtual pv in virtual cluster", func() {
ctx := context.Background()
pvc := &v1.PersistentVolumeClaim{
@@ -100,5 +100,11 @@ var PVCTests = func() {
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
var virtualPV v1.PersistentVolume
key := client.ObjectKey{Name: pvc.Name}
err = virtTestEnv.k8sClient.Get(ctx, key, &virtualPV)
Expect(err).NotTo(HaveOccurred())
})
}

View File

@@ -1,215 +0,0 @@
package syncer
import (
"context"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
podControllerName = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
)
type PodReconciler struct {
*SyncerContext
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
// initialize a new Reconciler
reconciler := PodReconciler{
SyncerContext: &SyncerContext{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
VirtualClient: virtMgr.GetClient(),
HostClient: hostMgr.GetClient(),
Translator: translate.ToHostTranslator{},
},
}
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
return ctrl.NewControllerManagedBy(virtMgr).
Named(name).
For(&v1.Pod{}).
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
Complete(&reconciler)
}
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1beta1.Cluster
ctx := context.Background()
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return false
}
// check for pvc config
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
// If PVC syncing is disabled, only process deletions to allow for cleanup.
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
ctx = ctrl.LoggerInto(ctx, log)
var (
virtPod v1.Pod
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// reconcile pods with pvcs
for _, vol := range virtPod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
log.Info("Handling pod with pvc")
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
return reconcile.Result{}, err
}
}
}
return reconcile.Result{}, nil
}
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
// and then created on the host, the PV is not synced to the host cluster.
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
ctx = ctrl.LoggerInto(ctx, log)
var pvc v1.PersistentVolumeClaim
key := types.NamespacedName{
Name: pvcSource.ClaimName,
Namespace: pod.Namespace,
}
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
pv := r.pseudoPV(&pvc)
if pod.DeletionTimestamp != nil {
return r.handlePodDeletion(ctx, pv)
}
log.Info("Creating pseudo Persistent Volume")
if err := r.VirtualClient.Create(ctx, pv); err != nil {
return ctrlruntimeclient.IgnoreAlreadyExists(err)
}
orig := pv.DeepCopy()
pv.Status = v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
}
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
return err
}
log.Info("Patch the status of PersistentVolumeClaim to Bound")
pvcPatch := pvc.DeepCopy()
if pvcPatch.Annotations == nil {
pvcPatch.Annotations = make(map[string]string)
}
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
pvcPatch.Status.Phase = v1.ClaimBound
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
return r.VirtualClient.Status().Update(ctx, pvcPatch)
}
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
var storageClass string
if obj.Spec.StorageClassName != nil {
storageClass = *obj.Spec.StorageClassName
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
Labels: map[string]string{
pseudoPVLabel: "true",
},
Annotations: map[string]string{
volume.AnnBoundByController: "true",
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
},
},
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolume",
APIVersion: "v1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "pseudopv",
},
},
StorageClassName: storageClass,
VolumeMode: obj.Spec.VolumeMode,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
AccessModes: obj.Spec.AccessModes,
Capacity: obj.Spec.Resources.Requests,
ClaimRef: &v1.ObjectReference{
APIVersion: obj.APIVersion,
UID: obj.UID,
ResourceVersion: obj.ResourceVersion,
Kind: obj.Kind,
Namespace: obj.Namespace,
Name: obj.Name,
},
},
}
}
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
var currentPV v1.PersistentVolume
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), &currentPV); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
pvPatch := currentPV.DeepCopy()
pvPatch.Spec.ClaimRef = nil
pvPatch.Status.Phase = v1.VolumeReleased
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
return err
}
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, &currentPV))
}

View File

@@ -92,7 +92,7 @@ func NewTestEnv() *TestEnv {
By("bootstrapping test environment")
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
ErrorIfCRDPathMissing: true,
BinaryAssetsDirectory: tempDir,
Scheme: buildScheme(),

View File

@@ -242,7 +242,7 @@ func (k *kubelet) start(ctx context.Context) {
// run the node async so that we can wait for it to be ready in another call
go func() {
klog.SetLogger(k.logger)
klog.SetLogger(k.logger.V(1))
ctx = log.WithLogger(ctx, klogv2.New(nil))
if err := k.node.Run(ctx); err != nil {
@@ -458,12 +458,6 @@ func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c
return errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
return errors.New("failed to add pod pvc controller: " + err.Error())
}
logger.Info("adding priorityclass controller")
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"strconv"
"strings"
@@ -86,7 +85,7 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
CoreClient: coreClient,
ClusterNamespace: namespace,
ClusterName: name,
logger: logger,
logger: logger.WithValues("cluster", name),
serverIP: serverIP,
dnsIP: dnsIP,
}
@@ -95,8 +94,12 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
}
// GetContainerLogs retrieves the logs of a container by name from the provider.
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
hostPodName := p.Translator.TranslateName(namespace, podName)
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, name, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
hostPodName := p.Translator.TranslateName(namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
logger.V(1).Info("GetContainerLogs")
options := corev1.PodLogOptions{
Container: containerName,
Timestamps: opts.Timestamps,
@@ -125,20 +128,27 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
if err != nil {
logger.Error(err, "Error getting logs from container")
}
return closer, err
}
// RunInContainer executes a command in a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error {
hostPodName := p.Translator.TranslateName(namespace, podName)
func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containerName string, cmd []string, attach api.AttachIO) error {
hostPodName := p.Translator.TranslateName(namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
logger.V(1).Info("RunInContainer")
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("exec")
req.VersionedParams(&corev1.PodExecOptions{
Container: containerName,
Command: cmd,
@@ -150,10 +160,11 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
logger.Error(err, "Error creating SPDY executor")
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
if err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
Stderr: attach.Stderr(),
@@ -161,18 +172,28 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
TerminalSizeQueue: &translatorSizeQueue{
resizeChan: attach.Resize(),
},
})
}); err != nil {
logger.Error(err, "Error while executing command in container")
return err
}
return nil
}
// AttachToContainer attaches to the executing process of a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, containerName string, attach api.AttachIO) error {
hostPodName := p.Translator.TranslateName(namespace, podName)
func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, containerName string, attach api.AttachIO) error {
hostPodName := p.Translator.TranslateName(namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
logger.V(1).Info("AttachToContainer")
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
Namespace(p.ClusterNamespace).
SubResource("attach")
req.VersionedParams(&corev1.PodAttachOptions{
Container: containerName,
TTY: attach.TTY(),
@@ -183,10 +204,11 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
logger.Error(err, "Error creating SPDY executor")
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
if err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
Stderr: attach.Stderr(),
@@ -194,7 +216,12 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
TerminalSizeQueue: &translatorSizeQueue{
resizeChan: attach.Resize(),
},
})
}); err != nil {
logger.Error(err, "Error while attaching to container")
return err
}
return nil
}
// GetStatsSummary gets the stats for the node, including running pods
@@ -203,7 +230,8 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
p.logger.Error(err, "Unable to get nodes of cluster")
return nil, err
}
// fetch the stats from all the nodes
@@ -221,14 +249,13 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
Suffix("stats/summary").
DoRaw(ctx)
if err != nil {
return nil, fmt.Errorf(
"unable to get stats of node '%s', from cluster %s in namespace %s: %w",
n.Name, p.ClusterName, p.ClusterNamespace, err,
)
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", n.Name)
return nil, err
}
stats := &stats.Summary{}
if err := json.Unmarshal(res, stats); err != nil {
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", n.Name)
return nil, err
}
@@ -241,6 +268,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
pods, err := p.GetPods(ctx)
if err != nil {
p.logger.Error(err, "Error getting pods from cluster for stats")
return nil, err
}
@@ -278,9 +306,12 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
// GetMetricsResource gets the metrics for the node, including running pods
func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily, error) {
p.logger.V(1).Info("GetMetricsResource")
statsSummary, err := p.GetStatsSummary(ctx)
if err != nil {
return nil, errors.Join(err, errors.New("error fetching MetricsResource"))
p.logger.Error(err, "Error getting stats summary from cluster for metrics")
return nil, err
}
registry := compbasemetrics.NewKubeRegistry()
@@ -288,15 +319,20 @@ func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily,
metricFamily, err := registry.Gather()
if err != nil {
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
p.logger.Error(err, "Error gathering metrics from collector")
return nil, err
}
return metricFamily, nil
}
// PortForward forwards a local port to a port on the pod
func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port int32, stream io.ReadWriteCloser) error {
hostPodName := p.Translator.TranslateName(namespace, pod)
func (p *Provider) PortForward(ctx context.Context, namespace, name string, port int32, stream io.ReadWriteCloser) error {
hostPodName := p.Translator.TranslateName(namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "port", port)
logger.V(1).Info("PortForward")
req := p.CoreClient.RESTClient().Post().
Resource("pods").
Name(hostPodName).
@@ -305,6 +341,7 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
if err != nil {
logger.Error(err, "Error creating RoundTripper for PortForward")
return err
}
@@ -318,10 +355,16 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
// so more work is needed to detect a close and handle that appropriately.
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
if err != nil {
logger.Error(err, "Error creating new PortForward")
return err
}
return fw.ForwardPorts()
if err := fw.ForwardPorts(); err != nil {
logger.Error(err, "Error forwarding ports")
return err
}
return nil
}
// CreatePod executes createPod with retry
@@ -329,17 +372,9 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
return p.withRetry(ctx, p.createPod, pod)
}
// createPod takes a Kubernetes Pod and deploys it within the provider.
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
var sourcePod corev1.Pod
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
return err
}
tPod := sourcePod.DeepCopy()
p.Translator.TranslateTo(tPod)
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name)
logger.V(1).Info("CreatePod")
// get Cluster definition
clusterKey := types.NamespacedName{
@@ -348,78 +383,101 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
}
var cluster v1beta1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
logger.Error(err, "Error getting Virtual Cluster definition")
return err
}
// these values shouldn't be set on create
tPod.UID = ""
tPod.ResourceVersion = ""
// get Pod from Virtual Cluster
key := types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
}
var virtualPod corev1.Pod
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
logger.Error(err, "Error getting Pod from Virtual Cluster")
return err
}
// Copy the virtual Pod and use it as a baseline for the hostPod
// do some basic translation and clearing some values (UID, ResourceVersion, ...)
hostPod := virtualPod.DeepCopy()
p.Translator.TranslateTo(hostPod)
logger = logger.WithValues("pod", hostPod.Name)
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
tPod.Spec.NodeName = ""
hostPod.Spec.NodeName = ""
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
// setting the hostname for the pod if its not set
if pod.Spec.Hostname == "" {
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
if virtualPod.Spec.Hostname == "" {
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
}
// if the priorityClass for the virtual cluster is set then override the provided value
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
if tPod.Spec.PriorityClassName != "" {
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
tPod.Spec.PriorityClassName = tPriorityClassName
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
if hostPod.Spec.PriorityClassName != "" {
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
hostPod.Spec.PriorityClassName = tPriorityClassName
}
if cluster.Spec.PriorityClass != "" {
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
tPod.Spec.Priority = nil
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
hostPod.Spec.Priority = nil
}
}
p.configurePodEnvs(hostPod, &virtualPod)
// fieldpath annotations
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
// host cluster version
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// sync serviceaccount token to a the host cluster
if err := p.transformTokens(ctx, pod, tPod); err != nil {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Info("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
// set ownerReference to the cluster object
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
if err := p.configureFieldPathEnv(&virtualPod, hostPod); err != nil {
logger.Error(err, "Unable to fetch fieldpath annotations for pod")
return err
}
return p.HostClient.Create(ctx, tPod)
// volumes will often refer to resources in the virtual cluster
// but instead need to refer to the synced host cluster version
p.transformVolumes(pod.Namespace, hostPod.Spec.Volumes)
// sync serviceaccount token to a the host cluster
if err := p.transformTokens(ctx, &virtualPod, hostPod); err != nil {
logger.Error(err, "Unable to transform tokens for pod")
return err
}
for i, imagePullSecret := range hostPod.Spec.ImagePullSecrets {
hostPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(virtualPod.Namespace, imagePullSecret.Name)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(hostPod, virtualPod.Name, virtualPod.Namespace, p.serverIP, p.dnsIP)
// set ownerReference to the cluster object
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.HostClient.Scheme()); err != nil {
logger.Error(err, "Unable to set owner reference for pod")
return err
}
if err := p.HostClient.Create(ctx, hostPod); err != nil {
logger.Error(err, "Error creating pod on host cluster")
return err
}
logger.Info("Pod created on host cluster")
return nil
}
// withRetry retries passed function with interval and timeout
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
const (
interval = 2 * time.Second
interval = time.Second
timeout = 10 * time.Second
)
@@ -443,44 +501,49 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev
return nil
}
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
// if one/more volumes couldn't be transformed
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
for _, volume := range volumes {
// transformVolumes changes the volumes to the representation in the host cluster
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) {
for i := range volumes {
volume := &volumes[i]
// Skip volumes related to Kube API access
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
if volume.ConfigMap != nil {
switch {
case volume.ConfigMap != nil:
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
} else if volume.Secret != nil {
case volume.Secret != nil:
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
} else if volume.Projected != nil {
case volume.PersistentVolumeClaim != nil:
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
case volume.Projected != nil:
for _, source := range volume.Projected.Sources {
if source.ConfigMap != nil {
switch {
case source.ConfigMap != nil:
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
} else if source.Secret != nil {
case source.Secret != nil:
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
}
}
} else if volume.PersistentVolumeClaim != nil {
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
} else if volume.DownwardAPI != nil {
case volume.DownwardAPI != nil:
for _, downwardAPI := range volume.DownwardAPI.Items {
if downwardAPI.FieldRef != nil {
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
switch downwardAPI.FieldRef.FieldPath {
case translate.MetadataNameField:
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
}
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
case translate.MetadataNamespaceField:
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
}
}
}
}
}
return nil
}
// UpdatePod executes updatePod with retry
@@ -489,96 +552,110 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
}
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.V(1).Info("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
// Update Pod in the virtual cluster
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
logger.V(1).Info("UpdatePod")
var currentVirtualPod corev1.Pod
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), &currentVirtualPod); err != nil {
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
}
//
// Host Pod update
//
hostNamespaceName := types.NamespacedName{
hostKey := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
Name: hostPodName,
}
var currentHostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &currentHostPod); err != nil {
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
var hostPod corev1.Pod
if err := p.HostClient.Get(ctx, hostKey, &hostPod); err != nil {
logger.Error(err, "Unable to get Pod to update from host cluster")
return err
}
// Handle ephemeral containers
if !cmp.Equal(currentHostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
p.logger.Info("Updating ephemeral containers")
updatePod(&hostPod, pod)
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if err := p.HostClient.Update(ctx, &hostPod); err != nil {
logger.Error(err, "Unable to update Pod in host cluster")
return err
}
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Error(err, "error when updating ephemeral containers")
// Ephemeral containers update (subresource)
if !cmp.Equal(hostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
logger.V(1).Info("Updating ephemeral containers in host pod")
hostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "Error when updating ephemeral containers in host pod")
return err
}
return nil
}
// fieldpath annotations
if err := p.configureFieldPathEnv(&currentVirtualPod, &currentHostPod); err != nil {
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
logger.Info("Pod updated in host cluster")
//
// Virtual Pod update
//
key := types.NamespacedName{
Name: pod.Name,
Namespace: pod.Namespace,
}
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
currentVirtualPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
currentVirtualPod.Spec.Tolerations = pod.Spec.Tolerations
// in the virtual cluster we can update also the labels and annotations
currentVirtualPod.Annotations = pod.Annotations
currentVirtualPod.Labels = pod.Labels
if err := p.VirtualClient.Update(ctx, &currentVirtualPod); err != nil {
return fmt.Errorf("unable to update pod in the virtual cluster: %w", err)
var virtualPod corev1.Pod
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
logger.Error(err, "Unable to get pod to update from virtual cluster")
return err
}
// Update Pod in the host cluster
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
updatePod(&virtualPod, pod)
// update ActiveDeadlineSeconds and Tolerations
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
// in the virtual cluster we can update also the labels and annotations
maps.Copy(currentHostPod.Annotations, pod.Annotations)
maps.Copy(currentHostPod.Labels, pod.Labels)
if err := p.HostClient.Update(ctx, &currentHostPod); err != nil {
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
if err := p.VirtualClient.Update(ctx, &virtualPod); err != nil {
logger.Error(err, "Unable to update Pod in virtual cluster")
return err
}
// Ephemeral containers update (subresource)
if !cmp.Equal(virtualPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
logger.V(1).Info("Updating ephemeral containers in virtual pod")
virtualPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "Error when updating ephemeral containers in virtual pod")
return err
}
}
logger.Info("Pod updated in virtual and host cluster")
return nil
}
func updatePod(dst, src *corev1.Pod) {
updateContainerImages(dst.Spec.Containers, src.Spec.Containers)
updateContainerImages(dst.Spec.InitContainers, src.Spec.InitContainers)
dst.Spec.ActiveDeadlineSeconds = src.Spec.ActiveDeadlineSeconds
dst.Spec.Tolerations = src.Spec.Tolerations
dst.Annotations = src.Annotations
dst.Labels = src.Labels
}
// updateContainerImages will update the images of the original container images with the same name
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
newImages := make(map[string]string)
func updateContainerImages(dst, src []corev1.Container) {
images := make(map[string]string)
for _, c := range updated {
newImages[c.Name] = c.Image
for _, container := range src {
images[container.Name] = container.Image
}
for i, c := range original {
if updatedImage, found := newImages[c.Name]; found {
original[i].Image = updatedImage
}
for i, container := range dst {
dst[i].Image = images[container.Name]
}
return original
}
// DeletePod executes deletePod with retry
@@ -590,20 +667,24 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
logger.V(1).Info("DeletePod")
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
logger.Info("Pod to delete not found in host cluster")
return nil
}
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
logger.Error(err, "Error trying to delete pod from host cluster")
return err
}
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
logger.Info("Pod deleted from host cluster")
return nil
}
@@ -613,21 +694,18 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
hostPodName := p.Translator.TranslateName(namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName)
logger.V(1).Info("GetPod")
pod, err := p.getPodFromHostCluster(ctx, hostPodName)
if err != nil {
logger.Error(err, "Error getting pod from host cluster for GetPod")
return nil, err
}
var pod corev1.Pod
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
return nil, fmt.Errorf("error when retrieving pod: %w", err)
}
p.Translator.TranslateFrom(&pod)
return &pod, nil
return pod, nil
}
// GetPodStatus retrieves the status of a pod by name from the provider.
@@ -635,28 +713,49 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
hostPodName := p.Translator.TranslateName(namespace, name)
pod, err := p.GetPod(ctx, namespace, name)
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName)
logger.V(1).Info("GetPodStatus")
pod, err := p.getPodFromHostCluster(ctx, hostPodName)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
logger.Error(err, "Error getting pod from host cluster for PodStatus")
return nil, err
}
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
return pod.Status.DeepCopy(), nil
}
func (p *Provider) getPodFromHostCluster(ctx context.Context, hostPodName string) (*corev1.Pod, error) {
key := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: hostPodName,
}
var pod corev1.Pod
if err := p.HostClient.Get(ctx, key, &pod); err != nil {
return nil, err
}
p.Translator.TranslateFrom(&pod)
return &pod, nil
}
// GetPods retrieves a list of all pods running on the provider (can be cached).
// The Pods returned are expected to be immutable, and may be accessed
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
p.logger.V(1).Info("GetPods")
selector := labels.NewSelector()
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
if err != nil {
return nil, fmt.Errorf("unable to create label selector: %w", err)
p.logger.Error(err, "Error creating label selector for GetPods")
return nil, err
}
selector = selector.Add(*requirement)
@@ -665,7 +764,8 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("unable to list pods: %w", err)
p.logger.Error(err, "Error listing pods from host cluster")
return nil, err
}
retPods := []*corev1.Pod{}
@@ -766,28 +866,91 @@ func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
return orig
}
func (p *Provider) configurePodEnvs(hostPod, virtualPod *corev1.Pod) {
for i := range hostPod.Spec.Containers {
hostPod.Spec.Containers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.Containers[i].Env)
hostPod.Spec.Containers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.Containers[i].EnvFrom)
}
for i := range hostPod.Spec.InitContainers {
hostPod.Spec.InitContainers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.InitContainers[i].Env)
hostPod.Spec.InitContainers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.InitContainers[i].EnvFrom)
}
for i := range hostPod.Spec.EphemeralContainers {
hostPod.Spec.EphemeralContainers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.EphemeralContainers[i].Env)
hostPod.Spec.EphemeralContainers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.EphemeralContainers[i].EnvFrom)
}
}
func (p *Provider) configureEnv(virtualPod *corev1.Pod, envs []corev1.EnvVar) []corev1.EnvVar {
resultingEnvVars := make([]corev1.EnvVar, 0, len(envs))
for _, envVar := range envs {
resultingEnvVar := envVar
if envVar.ValueFrom != nil {
from := envVar.ValueFrom
switch {
case from.FieldRef != nil:
fieldRef := from.FieldRef
// for name and namespace we need to hardcode the virtual cluster values, and clear the FieldRef
switch fieldRef.FieldPath {
case "metadata.name":
resultingEnvVar.Value = virtualPod.Name
resultingEnvVar.ValueFrom = nil
case "metadata.namespace":
resultingEnvVar.Value = virtualPod.Namespace
resultingEnvVar.ValueFrom = nil
}
case from.ConfigMapKeyRef != nil:
resultingEnvVar.ValueFrom.ConfigMapKeyRef.Name = p.Translator.TranslateName(virtualPod.Namespace, resultingEnvVar.ValueFrom.ConfigMapKeyRef.Name)
case from.SecretKeyRef != nil:
resultingEnvVar.ValueFrom.SecretKeyRef.Name = p.Translator.TranslateName(virtualPod.Namespace, resultingEnvVar.ValueFrom.SecretKeyRef.Name)
}
}
resultingEnvVars = append(resultingEnvVars, resultingEnvVar)
}
return resultingEnvVars
}
func (p *Provider) configureEnvFrom(virtualPod *corev1.Pod, envs []corev1.EnvFromSource) []corev1.EnvFromSource {
resultingEnvVars := make([]corev1.EnvFromSource, 0, len(envs))
for _, envVar := range envs {
resultingEnvVar := envVar
if envVar.ConfigMapRef != nil {
resultingEnvVar.ConfigMapRef.Name = p.Translator.TranslateName(virtualPod.Namespace, envVar.ConfigMapRef.Name)
}
if envVar.SecretRef != nil {
resultingEnvVar.SecretRef.Name = p.Translator.TranslateName(virtualPod.Namespace, envVar.SecretRef.Name)
}
resultingEnvVars = append(resultingEnvVars, resultingEnvVar)
}
return resultingEnvVars
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
for _, container := range pod.Spec.EphemeralContainers {
addFieldPathAnnotationToEnv(container.Env)
}
// override metadata.name and metadata.namespace with pod annotations
for _, container := range pod.Spec.InitContainers {
addFieldPathAnnotationToEnv(container.Env)
}
for _, container := range pod.Spec.Containers {
addFieldPathAnnotationToEnv(container.Env)
}
for name, value := range pod.Annotations {
if strings.Contains(name, webhook.FieldpathField) {
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
if err != nil {
return err
}
// re-adding these envs to the pod
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
Name: envName,
@@ -797,6 +960,7 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
},
},
})
// removing the annotation from the pod
delete(tPod.Annotations, name)
}
@@ -804,22 +968,3 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
return nil
}
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
for j, envVar := range envVars {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
envVars[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
envVars[j] = envVar
}
}
}

View File

@@ -4,7 +4,12 @@ import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
func Test_mergeEnvVars(t *testing.T) {
@@ -68,3 +73,230 @@ func Test_mergeEnvVars(t *testing.T) {
})
}
}
func Test_configureEnv(t *testing.T) {
virtualPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "my-pod",
Namespace: "my-namespace",
},
}
tests := []struct {
name string
virtualPod *corev1.Pod
envs []corev1.EnvVar
want []corev1.EnvVar
}{
{
name: "empty envs",
virtualPod: virtualPod,
envs: []corev1.EnvVar{},
want: []corev1.EnvVar{},
},
{
name: "simple env var",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{Name: "MY_VAR", Value: "my-value"},
},
want: []corev1.EnvVar{
{Name: "MY_VAR", Value: "my-value"},
},
},
{
name: "metadata.name field ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
want: []corev1.EnvVar{
{Name: "POD_NAME", Value: "my-pod"},
},
},
{
name: "metadata.namespace field ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
want: []corev1.EnvVar{
{Name: "POD_NAMESPACE", Value: "my-namespace"},
},
},
{
name: "other field ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
want: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
},
{
name: "secret key ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "SECRET_VAR",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"},
Key: "my-key",
},
},
},
},
want: []corev1.EnvVar{
{
Name: "SECRET_VAR",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret-my-namespace-c-test-6d792d7365637265742b6d792d6-887db"},
Key: "my-key",
},
},
},
},
},
{
name: "configmap key ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "CONFIG_VAR",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: "my-configmap"},
Key: "my-key",
},
},
},
},
want: []corev1.EnvVar{
{
Name: "CONFIG_VAR",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: "my-configmap-my-namespace-c-test-6d792d636f6e6669676d6170-301f6"},
Key: "my-key",
},
},
},
},
},
{
name: "resource field ref",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &corev1.EnvVarSource{
ResourceFieldRef: &corev1.ResourceFieldSelector{
ContainerName: "my-container",
Resource: "limits.cpu",
},
},
},
},
want: []corev1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &corev1.EnvVarSource{
ResourceFieldRef: &corev1.ResourceFieldSelector{
ContainerName: "my-container",
Resource: "limits.cpu",
},
},
},
},
},
{
name: "mixed env vars",
virtualPod: virtualPod,
envs: []corev1.EnvVar{
{Name: "MY_VAR", Value: "my-value"},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
want: []corev1.EnvVar{
{Name: "MY_VAR", Value: "my-value"},
{Name: "POD_NAME", Value: "my-pod"},
{Name: "POD_NAMESPACE", Value: "my-namespace"},
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
},
},
}
p := Provider{
Translator: translate.ToHostTranslator{
ClusterName: "c-test",
ClusterNamespace: "ns-test",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := p.configureEnv(tt.virtualPod, tt.envs)
assert.Equal(t, tt.want, got)
})
}
}

View File

@@ -23,7 +23,8 @@ const (
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "serviceAccountNameod", pod.Spec.ServiceAccountName)
logger.V(1).Info("Transforming token")
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters

View File

@@ -381,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
Verbs: []string{"*"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"create"},
},
{
APIGroups: []string{"networking.k8s.io"},
Resources: []string{"ingresses"},

View File

@@ -23,7 +23,7 @@ func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client,
}
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
return nil, err
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])

View File

@@ -269,8 +269,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
log.V(1).Info("Cluster version not set. Using host version.")
if cluster.Status.HostVersion == "" {
log.V(1).Info("Cluster host version not set.")
hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
@@ -278,8 +278,9 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
}
// update Status HostVersion
k8sVersion := strings.Split(hostVersion.GitVersion, "+")[0]
cluster.Status.HostVersion = k8sVersion + "-k3s1"
k8sVersion, _, _ := strings.Cut(hostVersion.GitVersion, "+")
k8sVersion, _, _ = strings.Cut(k8sVersion, "-")
cluster.Status.HostVersion = k8sVersion
}
token, err := c.token(ctx, cluster)

View File

@@ -41,7 +41,7 @@ var (
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
ErrorIfCRDPathMissing: true,
}

View File

@@ -2,7 +2,6 @@ package cluster_test
import (
"context"
"fmt"
"time"
"k8s.io/utils/ptr"
@@ -73,7 +72,6 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
serverVersion, err := k8s.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
expectedHostVersion := fmt.Sprintf("%s-k3s1", serverVersion.GitVersion)
Eventually(func() string {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
@@ -82,7 +80,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(expectedHostVersion))
Should(Equal(serverVersion.GitVersion))
// check NetworkPolicy
expectedNetworkPolicy := &networkingv1.NetworkPolicy{

View File

@@ -416,9 +416,11 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
func (s *Server) setupStartCommand() (string, error) {
var output bytes.Buffer
tmpl := singleServerTemplate
tmpl := StartupCommand
mode := "single"
if *s.cluster.Spec.Servers > 1 {
tmpl = HAServerTemplate
mode = "ha"
}
tmplCmd, err := template.New("").Parse(tmpl)
@@ -430,6 +432,8 @@ func (s *Server) setupStartCommand() (string, error) {
"ETCD_DIR": "/var/lib/rancher/k3s/server/db/etcd",
"INIT_CONFIG": "/opt/rancher/k3s/init/config.yaml",
"SERVER_CONFIG": "/opt/rancher/k3s/server/config.yaml",
"CLUSTER_MODE": mode,
"K3K_MODE": string(s.cluster.Spec.Mode),
"EXTRA_ARGS": strings.Join(s.cluster.Spec.ServerArgs, " "),
}); err != nil {
return "", err

View File

@@ -1,16 +1,102 @@
package server
var singleServerTemplate string = `
if [ -d "{{.ETCD_DIR}}" ]; then
# if directory exists then it means its not an initial run
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
fi
rm -f /var/lib/rancher/k3s/server/db/reset-flag
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log`
var StartupCommand string = `
info()
{
echo "[INFO] [$(date +"%c")]" "$@"
}
fatal()
{
echo "[FATAL] [$(date +"%c")] " "$@" >&2
exit 1
}
# safe mode function to reset node IP after pod restarts
safe_mode() {
CURRENT_IP=""
if [ -f /var/lib/rancher/k3s/k3k-node-ip ]; then
CURRENT_IP=$(cat /var/lib/rancher/k3s/k3k-node-ip)
fi
if [ -z "$CURRENT_IP" ] || [ "$CURRENT_IP" = "$POD_IP" ] || [ {{.K3K_MODE}} != "virtual" ]; then
return
fi
# skipping if the node is starting for the first time
if [ -d "{{.ETCD_DIR}}" ]; then
info "Starting K3s in Safe Mode (Network Policy Disabled) to patch Node IP from ${CURRENT_IP} to ${POD_IP}"
/bin/k3s server --disable-network-policy --config $1 {{.EXTRA_ARGS}} > /dev/null 2>&1 &
PID=$!
# Start the loop to wait for the nodeIP to change
info "Waiting for Node IP to update to ${POD_IP}."
count=0
until kubectl get nodes -o wide 2>/dev/null | grep -q "${POD_IP}"; do
if ! kill -0 $PID 2>/dev/null; then
fatal "safe Mode K3s process died unexpectedly!"
fi
sleep 2
count=$((count+1))
if [ $count -gt 60 ]; then
fatal "timed out waiting for node to change IP from $CURRENT_IP to $POD_IP"
fi
done
info "Node IP is set to ${POD_IP} successfully. Stopping Safe Mode process..."
kill $PID
wait $PID 2>/dev/null || true
fi
}
start_single_node() {
info "Starting single node setup..."
# checking for existing data in single server if found we must perform reset
if [ -d "{{.ETCD_DIR}}" ]; then
info "Existing data found in single node setup. Performing cluster-reset to ensure quorum..."
if ! /bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} > /dev/null 2>&1; then
fatal "cluster reset failed!"
fi
info "Cluster reset complete. Removing Reset flag file."
rm -f /var/lib/rancher/k3s/server/db/reset-flag
fi
# entering safe mode to ensure correct NodeIP
safe_mode {{.INIT_CONFIG}}
info "Adding pod IP file."
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
var HAServerTemplate string = `
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
else
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
fi`
}
start_ha_node() {
info "Starting pod $POD_NAME in HA node setup"
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
info "Adding pod IP file."
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
else
safe_mode {{.SERVER_CONFIG}}
info "Adding pod IP file."
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.info
fi
}
case "{{.CLUSTER_MODE}}" in
"ha")
start_ha_node
;;
"single"|*)
start_single_node
;;
esac`

View File

@@ -120,7 +120,7 @@ func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1b
if pod.DeletionTimestamp.IsZero() {
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
log.V(1).Info("Server Pod is being deleted. Removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
log.V(1).Info("Server Pod is being created. Adding finalizer", "pod", pod.Name, "namespace", pod.Namespace)
return p.Client.Update(ctx, pod)
}

View File

@@ -25,21 +25,24 @@ var Backoff = wait.Backoff{
Jitter: 0.1,
}
// Image returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the latest version as last fallback.
// K3SImage returns the rancher/k3s image tagged with the found K3SVersion.
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
image := k3SImage
imageVersion := "latest"
return k3SImage + ":" + K3SVersion(cluster)
}
// K3SVersion returns the rancher/k3s specified version.
// If empty it will return the k3s version of the Kubernetes version of the host cluster, stored in the Status object.
// Returns the latest version as fallback.
func K3SVersion(cluster *v1beta1.Cluster) string {
if cluster.Spec.Version != "" {
imageVersion = cluster.Spec.Version
} else if cluster.Status.HostVersion != "" {
imageVersion = cluster.Status.HostVersion
return cluster.Spec.Version
}
return image + ":" + imageVersion
if cluster.Status.HostVersion != "" {
return cluster.Status.HostVersion + "-k3s1"
}
return "latest"
}
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.

View File

@@ -51,7 +51,7 @@ func Test_K3S_Image(t *testing.T) {
},
},
},
expectedData: "rancher/k3s:v4.5.6",
expectedData: "rancher/k3s:v4.5.6-k3s1",
},
{
name: "cluster with empty version spec and empty hostVersion status",

View File

@@ -108,13 +108,27 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1bet
ip := k3kService.Spec.ClusterIP
port := int32(443)
if len(k3kService.Spec.Ports) == 0 {
logrus.Warn("No ports exposed by the cluster service.")
}
switch k3kService.Spec.Type {
case v1.ServiceTypeNodePort:
ip = hostServerIP
port = k3kService.Spec.Ports[0].NodePort
if len(k3kService.Spec.Ports) > 0 {
port = k3kService.Spec.Ports[0].NodePort
}
case v1.ServiceTypeLoadBalancer:
ip = k3kService.Status.LoadBalancer.Ingress[0].IP
port = k3kService.Spec.Ports[0].Port
if len(k3kService.Status.LoadBalancer.Ingress) > 0 {
ip = k3kService.Status.LoadBalancer.Ingress[0].IP
} else {
logrus.Warn("No ingress found in LoadBalancer service.")
}
if len(k3kService.Spec.Ports) > 0 {
port = k3kService.Spec.Ports[0].Port
}
}
if serverPort != 0 {
@@ -122,7 +136,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1bet
}
if !slices.Contains(cluster.Status.TLSSANs, ip) {
logrus.Warnf("ip %s not in tlsSANs", ip)
logrus.Warnf("IP %s not in tlsSANs.", ip)
if len(cluster.Spec.TLSSANs) > 0 {
logrus.Warnf("Using the first TLS SAN in the spec as a fallback: %s", cluster.Spec.TLSSANs[0])
@@ -133,7 +147,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1bet
ip = cluster.Status.TLSSANs[0]
} else {
logrus.Warn("ip not found in tlsSANs. This could cause issue with the certificate validation.")
logrus.Warn("IP not found in tlsSANs. This could cause issue with the certificate validation.")
}
}

View File

@@ -38,7 +38,7 @@ var (
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()

View File

@@ -10,4 +10,11 @@ CONTROLLER_TOOLS_VERSION=v0.16.0
go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSION} \
crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
object paths=./pkg/apis/... \
output:crd:dir=./charts/k3k/crds
output:crd:dir=./charts/k3k/templates/crds
# add the 'helm.sh/resource-policy: keep' annotation to the CRDs
for f in ./charts/k3k/templates/crds/*.yaml; do
sed -i '0,/^[[:space:]]*annotations:/s/^[[:space:]]*annotations:/&\n helm.sh\/resource-policy: keep/' "$f"
echo "Validating $f"
yq . "$f" > /dev/null
done

42
scripts/generate-cli-docs Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -eou pipefail
DOCS_DIR=./docs/cli
# Generate the raw markdown files
go run $DOCS_DIR/genclidoc.go
echo "Converting Markdown documentation to asciidoc"
pandoc --from markdown --to asciidoc --lua-filter=$DOCS_DIR/convert.lua $DOCS_DIR/k3kcli.md > $DOCS_DIR/k3kcli.adoc
echo "" >> $DOCS_DIR/k3kcli.adoc
# We use an explicit list of files to keep a stable ordering
SUBCOMMAND_FILES=(
"$DOCS_DIR/k3kcli_cluster.md"
"$DOCS_DIR/k3kcli_cluster_create.md"
"$DOCS_DIR/k3kcli_cluster_delete.md"
"$DOCS_DIR/k3kcli_cluster_list.md"
"$DOCS_DIR/k3kcli_kubeconfig.md"
"$DOCS_DIR/k3kcli_kubeconfig_generate.md"
"$DOCS_DIR/k3kcli_policy.md"
"$DOCS_DIR/k3kcli_policy_create.md"
"$DOCS_DIR/k3kcli_policy_delete.md"
"$DOCS_DIR/k3kcli_policy_list.md"
)
# Check for newly added doc files
EXPECTED_COUNT=${#SUBCOMMAND_FILES[@]}
ACTUAL_COUNT=$(ls -1 $DOCS_DIR/k3kcli_*.md | wc -l)
if [ "$EXPECTED_COUNT" -ne "$ACTUAL_COUNT" ]; then
echo "ERROR: File count mismatch!"
echo "Expected $EXPECTED_COUNT files in the array, but found $ACTUAL_COUNT on disk."
echo "Please update the SUBCOMMAND_FILES array in $0"
exit 1
fi
pandoc --from markdown --to asciidoc --lua-filter=$DOCS_DIR/convert.lua "${SUBCOMMAND_FILES[@]}">> $DOCS_DIR/k3kcli.adoc
echo "Asciidoc documentation generated at $DOCS_DIR/k3kcli.adoc"

View File

@@ -6,20 +6,29 @@ import (
"os/exec"
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func K3kcli(args ...string) (string, string, error) {
return runCmd("k3kcli", args...)
}
func Kubectl(args ...string) (string, string, error) {
return runCmd("kubectl", args...)
}
func runCmd(cmdName string, args ...string) (string, string, error) {
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd := exec.CommandContext(context.Background(), "k3kcli", args...)
cmd := exec.CommandContext(context.Background(), cmdName, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
@@ -47,12 +56,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
clusterNamespace := "k3k-" + clusterName
DeferCleanup(func() {
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: clusterNamespace,
},
})
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", clusterName)
@@ -66,7 +70,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
_, stderr, err = K3kcli("cluster", "delete", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Deleting [%s] cluster in namespace [%s]", clusterName, clusterNamespace))
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
// The deletion could take a bit
Eventually(func() string {
@@ -78,6 +82,24 @@ var _ = When("using the k3kcli", Label("cli"), func() {
WithPolling(time.Second).
Should(BeEmpty())
})
It("can create a cluster with the specified kubernetes version", func() {
var (
stderr string
err error
)
clusterName := "cluster-" + rand.String(5)
clusterNamespace := "k3k-" + clusterName
DeferCleanup(func() {
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.6-k3s1", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
})
})
When("trying the policy commands", func() {
@@ -92,7 +114,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
_, stderr, err = K3kcli("policy", "create", policyName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Creating policy [%s]", policyName))
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policyName))
stdout, stderr, err = K3kcli("policy", "list")
Expect(err).To(Not(HaveOccurred()), string(stderr))
@@ -102,12 +124,80 @@ var _ = When("using the k3kcli", Label("cli"), func() {
stdout, stderr, err = K3kcli("policy", "delete", policyName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stdout).To(BeEmpty())
Expect(stderr).To(BeEmpty())
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policyName))
stdout, stderr, err = K3kcli("policy", "list")
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stdout).To(BeEmpty())
Expect(stderr).To(BeEmpty())
Expect(stdout).To(Not(ContainSubstring(policyName)))
})
It("can bound a policy to a namespace", func() {
var (
stdout string
stderr string
err error
)
namespaceName := "ns-" + rand.String(5)
_, _, err = Kubectl("create", "namespace", namespaceName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
DeferCleanup(func() {
DeleteNamespaces(namespaceName)
})
By("Creating a policy and binding to a namespace")
policy1Name := "policy-" + rand.String(5)
_, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy1Name)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy1Name))
DeferCleanup(func() {
stdout, stderr, err = K3kcli("policy", "delete", policy1Name)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stdout).To(BeEmpty())
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy1Name))
})
var ns v1.Namespace
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(ns.Name).To(Equal(namespaceName))
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
By("Creating another policy and binding to the same namespace without the --overwrite flag")
policy2Name := "policy-" + rand.String(5)
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy2Name)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
DeferCleanup(func() {
stdout, stderr, err = K3kcli("policy", "delete", policy2Name)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stdout).To(BeEmpty())
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy2Name))
})
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(ns.Name).To(Equal(namespaceName))
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
By("Forcing the other policy binding with the overwrite flag")
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, "--overwrite", policy2Name)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(ns.Name).To(Equal(namespaceName))
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy2Name))
})
})
@@ -122,12 +212,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
clusterNamespace := "k3k-" + clusterName
DeferCleanup(func() {
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: clusterNamespace,
},
})
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
DeleteNamespaces(clusterNamespace)
})
_, stderr, err = K3kcli("cluster", "create", clusterName)
@@ -140,7 +225,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
_, stderr, err = K3kcli("cluster", "delete", clusterName)
Expect(err).To(Not(HaveOccurred()), string(stderr))
Expect(stderr).To(ContainSubstring("Deleting [%s] cluster in namespace [%s]", clusterName, clusterNamespace))
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
})
})
})

View File

@@ -13,7 +13,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), Label(certificatesTestsLabel), func() {
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label(e2eTestLabel), Label(certificatesTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
@@ -21,6 +21,10 @@ var _ = When("a cluster with custom certificates is installed with individual ce
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
// create custom cert secret
customCertDir := "testdata/customcerts/"

View File

@@ -5,7 +5,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkingTestsLabel), func() {
var _ = When("two virtual clusters are installed", Label(e2eTestLabel), Label(networkingTestsLabel), func() {
var (
cluster1 *VirtualCluster
cluster2 *VirtualCluster
@@ -28,7 +28,6 @@ var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkin
var (
stdout string
stderr string
curlCmd string
err error
)
@@ -70,25 +69,25 @@ var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkin
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).Should(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
})
})

View File

@@ -4,7 +4,6 @@ import (
"context"
"crypto/x509"
"errors"
"fmt"
"time"
"k8s.io/utils/ptr"
@@ -20,7 +19,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(persistenceTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
@@ -111,7 +110,7 @@ var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenc
})
})
var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
var _ = When("a dynamic cluster is installed", Label(e2eTestLabel), Label(persistenceTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
@@ -154,7 +153,9 @@ var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTe
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1beta1.DynamicPersistenceMode
@@ -164,8 +165,6 @@ var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTe
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
virtualCluster := &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,

281
tests/cluster_pod_test.go Normal file
View File

@@ -0,0 +1,281 @@
package k3k_test
import (
"context"
"fmt"
"os"
"os/exec"
"time"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/v1/pod"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
var virtualCluster *VirtualCluster
BeforeAll(func() {
virtualCluster = NewVirtualCluster()
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
When("creating a Pod with an invalid configuration", func() {
var virtualPod *v1.Pod
BeforeEach(func() {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: "default",
Labels: map[string]string{
"name": "var-expansion-test",
},
Annotations: map[string]string{
"notmysubpath": "mypath",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "ANNOTATION",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.annotations['mysubpath']",
},
},
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "workdir",
MountPath: "/volume_mount",
},
{
Name: "workdir",
MountPath: "/subpath_mount",
SubPathExpr: "$(ANNOTATION)/$(POD_NAME)",
},
},
},
},
Volumes: []v1.Volume{{
Name: "workdir",
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
}},
},
}
ctx := context.Background()
var err error
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("should be in Pending status with the CreateContainerConfigError until we fix the annotation", func() {
ctx := context.Background()
By("Checking the container status of the Pod in the Virtual Cluster")
Eventually(func(g Gomega) {
pod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodPending))
envVars := pod.Spec.Containers[0].Env
g.Expect(envVars).NotTo(BeEmpty())
var found bool
for _, envVar := range envVars {
if envVar.Name == "POD_NAME" {
found = true
g.Expect(envVars[0].ValueFrom).NotTo(BeNil())
g.Expect(envVars[0].ValueFrom.FieldRef).NotTo(BeNil())
g.Expect(envVars[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
break
}
}
g.Expect(found).To(BeTrue())
containerStatuses := pod.Status.ContainerStatuses
g.Expect(containerStatuses).To(HaveLen(1))
waitingState := containerStatuses[0].State.Waiting
g.Expect(waitingState).NotTo(BeNil())
g.Expect(waitingState.Reason).To(Equal("CreateContainerConfigError"))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
By("Checking the container status of the Pod in the Host Cluster")
Eventually(func(g Gomega) {
translator := translate.NewHostTranslator(virtualCluster.Cluster)
hostPodName := translator.NamespacedName(virtualPod)
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status.Phase).To(Equal(v1.PodPending))
envVars := pod.Spec.Containers[0].Env
g.Expect(envVars).NotTo(BeEmpty())
var found bool
for _, envVar := range envVars {
if envVar.Name == "POD_NAME" {
found = true
g.Expect(envVar.ValueFrom).To(BeNil())
g.Expect(envVar.Value).To(Equal(virtualPod.Name))
break
}
}
g.Expect(found).To(BeTrue())
containerStatuses := pod.Status.ContainerStatuses
g.Expect(containerStatuses).To(HaveLen(1))
waitingState := containerStatuses[0].State.Waiting
g.Expect(waitingState).NotTo(BeNil())
g.Expect(waitingState.Reason).To(Equal("CreateContainerConfigError"))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
By("Fixing the annotation")
var err error
virtualPod, err = virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
virtualPod.Annotations["mysubpath"] = virtualPod.Annotations["notmysubpath"]
delete(virtualPod.Annotations, "notmysubpath")
virtualPod, err = virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Update(ctx, virtualPod, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking the status of the Pod in the Virtual Cluster")
Eventually(func(g Gomega) {
vPod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
_, cond := pod.GetPodCondition(&vPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
By("Checking the status of the Pod in the Host Cluster")
Eventually(func(g Gomega) {
translator := translate.NewHostTranslator(virtualCluster.Cluster)
hostPodName := translator.NamespacedName(virtualPod)
hPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
_, cond := pod.GetPodCondition(&hPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
})
})
When("installing the nginx-ingress controller", func() {
BeforeAll(func() {
By("installing the nginx-ingress controller")
Expect(os.WriteFile("vk-kubeconfig.yaml", virtualCluster.Kubeconfig, 0o644)).To(Succeed())
ingressNginx := "testdata/resources/ingress-nginx-v1.14.1.yaml"
cmd := exec.Command("kubectl", "apply", "--kubeconfig", "vk-kubeconfig.yaml", "-f", ingressNginx)
output, err := cmd.CombinedOutput()
fmt.Println("#### output", "\n", string(output))
Expect(err).NotTo(HaveOccurred(), string(output))
})
expectJobToSucceed := func(client *kubernetes.Clientset, namespace string, label string) {
GinkgoHelper()
Eventually(func(g Gomega) {
listOpts := metav1.ListOptions{LabelSelector: "job-name=" + label}
pods, err := client.CoreV1().Pods(namespace).List(context.Background(), listOpts)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pods.Items).NotTo(BeEmpty())
g.Expect(pods.Items[0].Status.Phase).To(Equal(v1.PodSucceeded))
}).
WithPolling(time.Second).
WithTimeout(2 * time.Minute).
Should(Succeed())
}
It("should complete the ingress-nginx-admission-create Job in the host cluster", func() {
expectJobToSucceed(k8s, virtualCluster.Cluster.Namespace, "ingress-nginx-admission-create")
})
It("should complete the ingress-nginx-admission-create Job in the virtual cluster", func() {
expectJobToSucceed(virtualCluster.Client, "ingress-nginx", "ingress-nginx-admission-create")
})
It("should complete the ingress-nginx-admission-patch Job in the host cluster", func() {
expectJobToSucceed(k8s, virtualCluster.Cluster.Namespace, "ingress-nginx-admission-patch")
})
It("should complete the ingress-nginx-admission-patch Job in the virtual cluster", func() {
expectJobToSucceed(virtualCluster.Client, "ingress-nginx", "ingress-nginx-admission-patch")
})
It("should run the ingress-nginx controller", func() {
Eventually(func(g Gomega) {
ctx := context.Background()
deployment, err := virtualCluster.Client.AppsV1().Deployments("ingress-nginx").Get(ctx, "ingress-nginx-controller", metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
desiredReplicas := *deployment.Spec.Replicas
status := deployment.Status
g.Expect(status.ObservedGeneration).To(BeNumerically(">=", deployment.Generation))
g.Expect(status.UpdatedReplicas).To(BeNumerically("==", desiredReplicas))
g.Expect(status.AvailableReplicas).To(BeNumerically("==", desiredReplicas))
})
})
})
})

View File

@@ -18,7 +18,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLabel), func() {
var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusTestsLabel), func() {
var (
namespace *corev1.Namespace
vcp *v1beta1.VirtualClusterPolicy
@@ -27,7 +27,6 @@ var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLab
// This BeforeEach/AfterEach will create a new namespace and a default policy for each test.
BeforeEach(func() {
ctx := context.Background()
namespace = NewNamespace()
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
@@ -36,6 +35,11 @@ var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLab
}
Expect(k8sClient.Create(ctx, vcp)).To(Succeed())
namespace = NewNamespace()
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(namespace), namespace)
Expect(err).To(Not(HaveOccurred()))
namespace.Labels = map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
}

View File

@@ -14,7 +14,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created", Ordered, Label("e2e"), func() {
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
var (
virtualCluster *VirtualCluster
virtualConfigMap *corev1.ConfigMap

View File

@@ -18,7 +18,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
@@ -75,8 +75,11 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
var nodes v1.NodeList
Expect(k8sClient.List(ctx, &nodes)).To(Succeed())
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
Expect(aPods).To(HaveLen(len(nodes.Items)))
agentPod := aPods[0]
@@ -124,6 +127,11 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
@@ -136,8 +144,16 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
var nodes v1.NodeList
g.Expect(k8sClient.List(ctx, &nodes)).To(Succeed())
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
g.Expect(aPods).To(HaveLen(len(nodes.Items)))
agentPod := aPods[0]
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
@@ -156,7 +172,7 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
})
})
var _ = When("a shared mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
@@ -207,6 +223,11 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), Label
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
@@ -215,7 +236,7 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), Label
})
})
var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
@@ -324,6 +345,11 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
@@ -339,6 +365,11 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
@@ -356,12 +387,16 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
})
})
var _ = When("a virtual mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a virtual mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial args for server
@@ -406,6 +441,11 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), Labe
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
@@ -414,7 +454,7 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), Labe
})
})
var _ = When("a shared mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
@@ -500,7 +540,7 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), Label(upd
})
})
var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
@@ -601,7 +641,7 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(up
})
})
var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
@@ -686,7 +726,7 @@ var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(upda
})
})
var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
@@ -773,7 +813,7 @@ var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(up
})
})
var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
@@ -858,7 +898,7 @@ var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(upd
})
})
var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod

View File

@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"net/url"
"os"
"strings"
"sync"
"time"
@@ -16,6 +17,7 @@ import (
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -34,6 +36,7 @@ type VirtualCluster struct {
Cluster *v1beta1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
Kubeconfig []byte
}
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
@@ -47,14 +50,12 @@ func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *Virtual
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = persistenceType
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
@@ -62,6 +63,7 @@ func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *Virtual
Cluster: cluster,
RestConfig: restConfig,
Client: client,
Kubeconfig: kubeconfig,
}
}
@@ -101,6 +103,11 @@ func NewNamespace() *v1.Namespace {
func DeleteNamespaces(names ...string) {
GinkgoHelper()
if _, found := os.LookupEnv("KEEP_NAMESPACES"); found {
By(fmt.Sprintf("Keeping namespace %v", names))
return
}
wg := sync.WaitGroup{}
wg.Add(len(names))
@@ -109,24 +116,18 @@ func DeleteNamespaces(names ...string) {
defer wg.Done()
defer GinkgoRecover()
deleteNamespace(name)
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
}()
}
wg.Wait()
}
func deleteNamespace(name string) {
GinkgoHelper()
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1beta1.Cluster {
return &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
@@ -141,9 +142,6 @@ func NewCluster(namespace string) *v1beta1.Cluster {
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.EphemeralPersistenceMode,
},
ServerArgs: []string{
"--disable-network-policy",
},
},
}
}
@@ -151,6 +149,8 @@ func NewCluster(namespace string) *v1beta1.Cluster {
func CreateCluster(cluster *v1beta1.Cluster) {
GinkgoHelper()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", cluster.Namespace))
ctx := context.Background()
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
@@ -158,7 +158,7 @@ func CreateCluster(cluster *v1beta1.Cluster) {
expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)
By(fmt.Sprintf("Waiting for cluster to be ready. Expected servers: %d. Expected agents: %d", expectedServers, expectedAgents))
By(fmt.Sprintf("Waiting for cluster %s to be ready in namespace %s. Expected servers: %d. Expected agents: %d", cluster.Name, cluster.Namespace, expectedServers, expectedAgents))
// track the Eventually status to log for changes
prev := -1
@@ -189,7 +189,11 @@ func CreateCluster(cluster *v1beta1.Cluster) {
}
if prev != (serversReady + agentsReady) {
GinkgoLogr.Info("Waiting for pods to be Ready", "servers", serversReady, "agents", agentsReady, "time", time.Now().Format(time.DateTime))
GinkgoLogr.Info("Waiting for pods to be Ready",
"servers", serversReady, "agents", agentsReady,
"name", cluster.Name, "namespace", cluster.Namespace,
"time", time.Now().Format(time.DateTime),
)
prev = (serversReady + agentsReady)
}
@@ -246,6 +250,39 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
return virtualK8sClient, restcfg
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndKubeconfig(cluster *v1beta1.Cluster) (*kubernetes.Clientset, *rest.Config, []byte) {
GinkgoHelper()
var (
err error
config *clientcmdapi.Config
)
ctx := context.Background()
Eventually(func() error {
vKubeconfig := kubeconfig.New()
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
configData, err := clientcmd.Write(*config)
Expect(err).To(Not(HaveOccurred()))
restcfg, err := clientcmd.RESTConfigFromKubeConfig(configData)
Expect(err).To(Not(HaveOccurred()))
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
return virtualK8sClient, restcfg, configData
}
func (c *VirtualCluster) NewNginxPod(namespace string) (*v1.Pod, string) {
GinkgoHelper()

View File

@@ -1,35 +0,0 @@
package k3k_test
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods(k3kNamespace).List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
return true
}
}
return false
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})

View File

@@ -0,0 +1,668 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
spec:
automountServiceAccountToken: true
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.14.1@sha256:f95a79b85fb93ac3de752c71a5c27d5ceae10a18b61904dec224c1c6a4581e47
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsGroup: 82
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission-create
spec:
automountServiceAccountToken: true
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.5@sha256:03a00eb0e255e8a25fa49926c24cde0f7e12e8d072c445cdf5136ec78b546285
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
ttlSecondsAfterFinished: 0
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission-patch
spec:
automountServiceAccountToken: true
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.5@sha256:03a00eb0e255e8a25fa49926c24cde0f7e12e8d072c445cdf5136ec78b546285
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
ttlSecondsAfterFinished: 0
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.14.1
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

View File

@@ -41,8 +41,8 @@ import (
const (
k3kNamespace = "k3k-system"
k3kName = "k3k"
e2eTestLabel = "e2e"
slowTestsLabel = "slow"
updateTestsLabel = "update"
persistenceTestsLabel = "persistence"
@@ -194,7 +194,7 @@ func installK3kChart() {
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(helmActionConfig)
iCli.ReleaseName = k3kName
iCli.ReleaseName = "k3k"
iCli.Namespace = k3kNamespace
iCli.CreateNamespace = true
iCli.Timeout = time.Minute
@@ -223,6 +223,12 @@ func installK3kChart() {
}
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
deployments, err := clientset.AppsV1().Deployments(k3kNamespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(deployments.Items).To(HaveLen(1))
k3kDeployment := &deployments.Items[0]
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "coverage-data-pvc",
@@ -240,59 +246,46 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
},
}
_, err := clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
_, err = clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
patchData := []byte(`
{
"spec": {
"template": {
"spec": {
"volumes": [
{
"name": "tmp-covdata",
"persistentVolumeClaim": {
"claimName": "coverage-data-pvc"
}
}
],
"containers": [
{
"name": "k3k",
"volumeMounts": [
{
"name": "tmp-covdata",
"mountPath": "/tmp/covdata"
}
],
"env": [
{
"name": "GOCOVERDIR",
"value": "/tmp/covdata"
}
]
}
]
}
}
}
}`)
k3kSpec := k3kDeployment.Spec.Template.Spec
GinkgoWriter.Printf("Applying patch to deployment '%s' in namespace '%s'...\n", k3kName, k3kNamespace)
// check if the Deployment has already the volume for the coverage
for _, volumes := range k3kSpec.Volumes {
if volumes.Name == "tmp-covdata" {
return
}
}
_, err = clientset.AppsV1().Deployments(k3kNamespace).Patch(
ctx,
k3kName,
types.StrategicMergePatchType,
patchData,
metav1.PatchOptions{},
)
k3kSpec.Volumes = append(k3kSpec.Volumes, v1.Volume{
Name: "tmp-covdata",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "coverage-data-pvc",
},
},
})
k3kSpec.Containers[0].VolumeMounts = append(k3kSpec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: "tmp-covdata",
MountPath: "/tmp/covdata",
})
k3kSpec.Containers[0].Env = append(k3kSpec.Containers[0].Env, v1.EnvVar{
Name: "GOCOVERDIR",
Value: "/tmp/covdata",
})
k3kDeployment.Spec.Template.Spec = k3kSpec
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
GinkgoWriter.Println("Checking K3k deployment status")
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kName, metav1.GetOptions{})
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
// 1. Check if the controller has observed the latest generation
@@ -309,7 +302,7 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
// 3. Check if all updated replicas are available
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
GinkgoWriter.Printf("K3k deployment availabl replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
return false
}
@@ -356,8 +349,11 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
Expect(err).To(Not(HaveOccurred()))
k3kPod := podList.Items[0]
k3kContainerName := k3kPod.Spec.Containers[0].Name
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", "k3k", "--", "kill", "1")
By("Restarting k3k controller " + k3kPod.Name + "/" + k3kContainerName)
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", k3kContainerName, "--", "/bin/sh", "-c", "kill 1")
output, err := cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
@@ -387,9 +383,56 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
GinkgoWriter.Printf("Downloading covdata from k3k controller %s/%s to %s\n", k3kNamespace, k3kPod.Name, folder)
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, k3kPod.Name), folder)
tarPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "tar",
Namespace: k3kNamespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "tar",
Image: "busybox",
Command: []string{"/bin/sh", "-c", "sleep 3600"},
VolumeMounts: []v1.VolumeMount{{
Name: "tmp-covdata",
MountPath: "/tmp/covdata",
}},
}},
Volumes: []v1.Volume{{
Name: "tmp-covdata",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "coverage-data-pvc",
},
},
}},
},
}
_, err = k8s.CoreV1().Pods(k3kNamespace).Create(ctx, tarPod, metav1.CreateOptions{})
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
By("Waiting for tar pod to be ready")
Eventually(func(g Gomega) {
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(tarPod), tarPod)
g.Expect(err).To(Not(HaveOccurred()))
_, cond := pod.GetPodCondition(&tarPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Succeed())
By("Copying covdata from tar pod")
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, tarPod.Name), folder)
output, err = cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
Expect(k8sClient.Delete(ctx, tarPod)).To(Succeed())
}
func getK3kLogs(ctx context.Context) io.ReadCloser {