Compare commits

..

29 Commits

Author SHA1 Message Date
Enrico Candino
2e6de51dab Improve tests resiliency (#539)
* fix missing namespaces cleanup

* fix conflict namespace

* fix PVC already created error, patch for existing volume, and check with hardcoded k3k name

* removed useless test

* fix for dump covdata from external pod

* keep namespaces flag

* fix for multi-node clusters

* fix for hanging pod in isolated namespace
2025-10-31 21:51:37 +01:00
Enrico Candino
90aecbbb42 Bump Charts to 1.0.0-rc3 (#542) 2025-10-31 17:01:03 +01:00
Enrico Candino
af9e1d6ca7 Cleanup orphaned resources after Cluster deletion (#540)
* adding controller reference for garbage collection, delete API lease

* added test

* fix lint
2025-10-31 15:25:38 +01:00
Enrico Candino
ae380fa8e9 bump chart to 1.0.0-rc2 (#535) 2025-10-28 16:28:45 +01:00
Enrico Candino
c34cf9ce94 added virtual mode conformance tests (#534) 2025-10-28 13:47:31 +01:00
Enrico Candino
bf70e0d171 Updated Cluster and VirtualClusterPolicy spec for sync and loadbalancer (#528)
* add default false for ingress and priorityClass, cleanup tests and added new tests

* fix typo for loadBalancer

* fix test aligning VirtualClusterPolicy SyncConfig

* set required enabled field, revert pointer on optional SyncConfig

* update samples
2025-10-24 17:02:26 +02:00
Enrico Candino
cebf6594c4 switch to text log as default (#529) 2025-10-24 13:42:41 +02:00
Enrico Candino
075d72df5d Cleanup of customCAs spec (#527)
* cleanup spec from customCAs when omitted

* add enabled default for customCAs
2025-10-23 22:11:44 +02:00
Enrico Candino
ee7eac89ce Enhance logging and update Helm installation parameters for better debugging and cluster management (#519) 2025-10-22 14:55:47 +02:00
Enrico Candino
514fdf6b86 Fix for flaky test (#523)
* fix for flaky test

* fix lint

* check ContainersReady condition
2025-10-21 18:19:36 +02:00
Hussein Galal
730e4e1c79 Fix pseudo PV deletion (#511)
* Fix pseudo PV deletion

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix pseudo PV deletion

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-18 00:56:50 +02:00
Hussein Galal
a3076af38f Increase timeout and add timeout option (#514)
* Increase timeout and add timeout option

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Increase timeout and add timeout option

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-17 16:51:40 +03:00
Hussein Galal
89dc352bea Scale up/down tests for virtual and shared mode (#508)
* Scale up/down tests for virtual and shared mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* defer cleanup and more fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add labels to e2e tests and divide the workload

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add labels to e2e tests and divide the workload

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add validate job to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix label filters for e2e tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix makefile

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* use constants for e2e tests labels

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix labels

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-15 17:01:14 +03:00
Enrico Candino
7644406eeb Fix for flaky test (#509)
* fix for flaky test and fix for PVC creation

* fix lint
2025-10-15 11:31:45 +02:00
Enrico Candino
2206632dcc bump charts (#507) 2025-10-14 15:19:00 +02:00
Enrico Candino
8ffdc9bafd renaming webhook (#506) 2025-10-13 17:25:17 +02:00
Enrico Candino
594c2571c3 promoted v1alpha1 resources to v1beta1 (#505) 2025-10-13 17:24:56 +02:00
Hussein Galal
12971f55a6 Add k8s version upgrade test (#503)
* Add k8s version upgrade test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-13 17:14:25 +03:00
Enrico Candino
99f750525f Fix extraEnv and other Helm values (#500)
* fix for extraEnv

* moved env var to flags

* changed resources as object

* renamed replicaCount to replicas

* cleanup spaces

* moved some values and spacing

* renamed some flags
2025-10-13 12:50:07 +02:00
Hussein Galal
a0fd472841 Use K3S host cluster for E2E tests (#492)
* Add kubeconfig to e2e_tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add E2E_KUBECONFIG env variable

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix yaml permissions for kubeconfig

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix image name and use ttl.sh

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add uuidgen result to a file

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add hostIP

* Add k3s version to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove virtual mode tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix failed test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add KUBECONFIG env variable to the make install

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add k3kcli to github_path

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Use docker installation for testing the cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-08 15:39:35 +03:00
Enrico Candino
7387fc1b23 Fix Service reconciliation error loop (#497)
* fix service reconciliation error by adding checks for virtual service annotations

* renamed var
2025-10-08 14:03:50 +02:00
Enrico Candino
9f265c73d9 Fix for HA server deletion (#493)
* wip

* wip

* wip

* removed todo
2025-10-08 13:23:15 +02:00
Enrico Candino
00ef6d582c Add log-format, and cleanup (#494)
* using logr.Logger

* testing levels

* adding log format

* fix lint

* removed tests

* final cleanup
2025-10-08 13:19:57 +02:00
Enrico Candino
5c95ca3dfa Fix for pod eviction in host cluster (#484)
* update statefulset controller

* fix for single pod

* adding pod controller

* added test

* removed comment

* merged service controller

* revert statefulset

* added test

* added common owner filter
2025-10-03 16:22:54 +02:00
jpgouin
6523b8339b change the default storage request size request to 2Gi (#490)
* change the default storage request size request to 2Gi
2025-10-03 09:04:13 +02:00
Hussein Galal
80037e815f Adding upgrade path tests (#481)
* Adding upgrade path tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Remove update label

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-02 14:53:08 +03:00
Enrico Candino
7585611792 Rename PodController to StatefulSetController (#482)
* renamed pod.go

* update statefulset controller

* fix for single pod

* added test, revert finalizer

* wip ha deletion

* revert logic

* remove focus
2025-10-01 17:06:24 +02:00
Hussein Galal
0bd681ab60 Lb service status sync (#451)
* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-01 13:25:31 +03:00
Hussein Galal
4fe36b3d0c Bump Chart to v0.3.5 (#485)
* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-30 15:26:59 +03:00
97 changed files with 3376 additions and 1233 deletions

View File

@@ -0,0 +1,125 @@
name: Conformance Tests - Virtual Mode
on:
schedule:
- cron: "0 1 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
conformance:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
type:
- parallel
- serial
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install helm
uses: azure/setup-helm@v4.3.0
- name: Install hydrophone
run: go install sigs.k8s.io/hydrophone@latest
- name: Install k3s
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
K3S_HOST_VERSION: v1.32.1+k3s1
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
kubectl cluster-info
kubectl get nodes
- name: Build, package and setup K3k
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
export REPO=ttl.sh/$(uuidgen)
export VERSION=1h
make build
make package
make push
make install
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
echo "Wait for K3k controller to be available"
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
- name: Check k3kcli
run: k3kcli -v
- name: Create virtual cluster
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
k3kcli cluster create --mode=virtual --servers=2 mycluster
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
kubectl cluster-info
kubectl get nodes
kubectl get pods -A
- name: Run conformance tests (parallel)
if: matrix.type == 'parallel'
run: |
# Run conformance tests in parallel mode (skipping serial)
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Run conformance tests (serial)
if: matrix.type == 'serial'
run: |
# Run serial conformance tests
hydrophone --focus='\[Serial\].*\[Conformance\]' \
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
--output-dir /tmp
- name: Export logs
if: always()
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
run: |
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
- name: Archive K3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3s-${{ matrix.type }}-logs
path: /tmp/k3s.log
- name: Archive K3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: k3k-${{ matrix.type }}-logs
path: /tmp/k3k.log
- name: Archive conformance logs
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-${{ matrix.type }}-logs
path: /tmp/e2e.log

View File

@@ -106,7 +106,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
@@ -259,7 +259,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

184
.github/workflows/test-e2e.yaml vendored Normal file
View File

@@ -0,0 +1,184 @@
name: Tests E2E
on:
push:
pull_request:
workflow_dispatch:
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Validate
run: make validate
tests-e2e:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
- name: Run e2e tests
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make E2E_LABEL_FILTER="e2e && !slow" test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log
tests-e2e-slow:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
- name: Run e2e tests
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make E2E_LABEL_FILTER="e2e && slow" test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log

View File

@@ -62,76 +62,6 @@ jobs:
files: ./cover.out
flags: unit
tests-e2e:
runs-on: ubuntu-latest
needs: validate
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
- name: Build and package
run: |
make build
make package
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Check k3kcli
run: k3kcli -v
- name: Run e2e tests
run: make test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
- name: Upload coverage reports to Codecov (controller)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ${GOCOVERDIR}/cover.out
flags: controller
- name: Upload coverage reports to Codecov (e2e)
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./cover.out
flags: e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3s-logs
path: /tmp/k3s.log
- name: Archive k3k logs
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-k3k-logs
path: /tmp/k3k.log
tests-cli:
runs-on: ubuntu-latest
needs: validate
@@ -150,12 +80,13 @@ jobs:
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
run: |
@@ -169,6 +100,9 @@ jobs:
run: k3kcli -v
- name: Run cli tests
env:
K3K_DOCKER_INSTALL: "true"
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
run: make test-cli
- name: Convert coverage data

View File

@@ -18,6 +18,9 @@ CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
ENVTEST_DIR ?= $(shell pwd)/.envtest
E2E_LABEL_FILTER ?= "e2e"
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
@@ -69,7 +72,7 @@ test-kubelet-controller: ## Run the controller tests (pkg/controller)
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
.PHONY: test-cli
test-cli: ## Run the cli tests
@@ -83,7 +86,7 @@ generate: ## Generate the CRDs specs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1alpha1 \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
@@ -105,6 +108,8 @@ validate: generate docs fmt ## Validate the project checking for any dependency
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
helm upgrade --install --namespace k3k-system --create-namespace \
--set controller.extraEnv[0].name=DEBUG \
--set-string controller.extraEnv[0].value=true \
--set controller.image.repository=$(REPO)/k3k \
--set controller.image.tag=$(VERSION) \
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \

View File

@@ -3,7 +3,8 @@
[![Experimental](https://img.shields.io/badge/status-experimental-orange.svg)](https://shields.io/)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3k)](https://goreportcard.com/report/github.com/rancher/k3k)
![Tests](https://github.com/rancher/k3k/actions/workflows/test.yaml/badge.svg)
![Build](https://github.com/rancher/k3k/actions/workflows/build.yml/badge.svg)
![Build](https://github.com/rancher/k3k/actions/workflows/build.yml/badge.svg)
[![Conformance Tests - Virtual Mode](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml/badge.svg)](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml)
K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host. K3k offers both "shared" mode, optimizing resource utilization, and "virtual" mode, providing complete isolation with dedicated K3s server pods. This allows you to access a full Kubernetes experience without the overhead of managing separate physical resources.
@@ -71,7 +72,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -79,7 +80,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli version v0.3.4
k3kcli version v0.3.5
```
@@ -135,7 +136,7 @@ You can also directly create a Cluster resource in some namespace, to create a K
```bash
kubectl apply -f - <<EOF
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.5-rc1
appVersion: v0.3.5-rc1
version: 1.0.0-rc3
appVersion: v1.0.0-rc3

View File

@@ -24,7 +24,7 @@ spec:
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -228,6 +228,7 @@ spec:
certificates.
properties:
enabled:
default: true
description: Enabled toggles this feature on or off.
type: boolean
sources:
@@ -244,6 +245,8 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
etcdPeerCA:
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
@@ -256,6 +259,8 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
etcdServerCA:
description: ETCDServerCA specifies the etcd-server-ca cert/key
@@ -268,6 +273,8 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
requestHeaderCA:
description: RequestHeaderCA specifies the request-header-ca
@@ -280,6 +287,8 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
serverCA:
description: ServerCA specifies the server-ca cert/key pair.
@@ -291,6 +300,8 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
serviceAccountToken:
description: ServiceAccountToken specifies the service-account-token
@@ -303,8 +314,20 @@ spec:
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
- For ServiceAccountTokenKey: 'tls.key'.
type: string
required:
- secretName
type: object
required:
- clientCA
- etcdPeerCA
- etcdServerCA
- requestHeaderCA
- serverCA
- serviceAccountToken
type: object
required:
- enabled
- sources
type: object
expose:
description: |-
@@ -326,7 +349,7 @@ spec:
use for the Ingress.
type: string
type: object
loadbalancer:
loadBalancer:
description: LoadBalancer specifies options for exposing the API
server through a LoadBalancer service.
properties:
@@ -368,7 +391,7 @@ spec:
x-kubernetes-validations:
- message: ingress, loadbalancer and nodePort are mutually exclusive;
only one can be set
rule: '[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x,
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
x).size() <= 1'
mirrorHostNodes:
description: |-
@@ -410,7 +433,7 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
default: 1G
default: 2G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
@@ -584,12 +607,13 @@ spec:
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configmaps:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -599,6 +623,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
@@ -606,6 +632,7 @@ spec:
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -615,6 +642,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
@@ -622,6 +651,7 @@ spec:
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -631,6 +661,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
@@ -638,6 +670,7 @@ spec:
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -647,6 +680,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
@@ -654,6 +689,7 @@ spec:
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -670,6 +706,7 @@ spec:
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -679,6 +716,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
tlsSANs:

View File

@@ -20,7 +20,7 @@ spec:
- jsonPath: .spec.allowedMode
name: Mode
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -230,12 +230,13 @@ spec:
description: Sync specifies the resources types that will be synced
from virtual cluster to host cluster.
properties:
configmaps:
configMaps:
default:
enabled: true
description: ConfigMaps resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -245,6 +246,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
ingresses:
default:
@@ -252,6 +255,7 @@ spec:
description: Ingresses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -261,6 +265,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
persistentVolumeClaims:
default:
@@ -268,6 +274,7 @@ spec:
description: PersistentVolumeClaims resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -277,6 +284,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
priorityClasses:
default:
@@ -284,6 +293,7 @@ spec:
description: PriorityClasses resources sync configuration.
properties:
enabled:
default: false
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -293,6 +303,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
secrets:
default:
@@ -300,6 +312,7 @@ spec:
description: Secrets resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -316,6 +329,7 @@ spec:
description: Services resources sync configuration.
properties:
enabled:
default: true
description: Enabled is an on/off switch for syncing resources.
type: boolean
selector:
@@ -325,6 +339,8 @@ spec:
Selector specifies set of labels of the resources that will be synced, if empty
then all resources of the given type will be synced.
type: object
required:
- enabled
type: object
type: object
type: object

View File

@@ -6,7 +6,7 @@ metadata:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.controller.replicaCount }}
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
{{- include "k3k.selectorLabels" . | nindent 6 }}
@@ -20,51 +20,35 @@ spec:
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
name: {{ .Chart.Name }}
{{- with .Values.controller.resources }}
resources:
requests:
cpu: {{ .Values.controller.resources.requests.cpu }}
memory: {{ .Values.controller.resources.requests.memory }}
limits:
{{ if .Values.controller.resources.limits.cpu }}
cpu: {{ .Values.controller.resources.limits.cpu }}
{{ end }}
{{ if .Values.controller.resources.limits.memory }}
memory: {{ .Values.controller.resources.limits.memory }}
{{ end}}
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- k3k
- --cluster-cidr={{ .Values.host.clusterCIDR }}
- --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }}
- --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }}
- --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}
- --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }}
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --agent-image-pull-secret
- --agent-image-pull-secret
- {{ .name }}
{{- end }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --server-image-pull-secret
- --server-image-pull-secret
- {{ .name }}
{{- end }}
env:
- name: CLUSTER_CIDR
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}"
- name: SHARED_AGENT_IMAGE_PULL_POLICY
value: {{ .Values.agent.shared.image.pullPolicy }}
- name: VIRTUAL_AGENT_IMAGE
value: "{{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}"
- name: VIRTUAL_AGENT_IMAGE_PULL_POLICY
value: {{ .Values.agent.virtual.image.pullPolicy }}
- name: K3S_SERVER_IMAGE
value: "{{- include "server.registry" .}}{{ .Values.server.image.repository }}"
- name: K3S_SERVER_IMAGE_PULL_POLICY
value: {{ .Values.server.image.pullPolicy }}
- name: KUBELET_PORT_RANGE
value: {{ .Values.agent.shared.kubeletPortRange }}
- name: WEBHOOK_PORT_RANGE
value: {{ .Values.agent.shared.webhookPortRange }}
- name: CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- with .Values.controller.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:

View File

@@ -20,13 +20,15 @@ host:
clusterCIDR: ""
controller:
replicaCount: 1
replicas: 1
image:
registry: ""
repository: rancher/k3k
tag: ""
pullPolicy: ""
imagePullSecrets: []
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
@@ -39,35 +41,16 @@ controller:
# name: my-secret
# key: my-key
extraEnv: []
# resources limits and requests allows you to set resources limits and requests for CPU and Memory
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: ""
memory: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""
# resources allows you to set resources limits and requests for CPU and Memory
# resources:
# limits:
# cpu: "200m"
# memory: "200Mi"
# requests:
# cpu: "100m"
# memory: "100Mi"
resources: {}
# configuration related to k3s server component in k3k
server:
@@ -76,3 +59,27 @@ server:
registry:
repository: "rancher/k3s"
pullPolicy: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -46,6 +46,7 @@ type CreateConfig struct {
policy string
mirrorHostNodes bool
customCertsPath string
timeout time.Duration
}
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
@@ -78,7 +79,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
return errors.New("invalid cluster name")
}
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
if config.mode == string(v1beta1.SharedClusterMode) && config.agents != 0 {
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
}
@@ -114,8 +115,8 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
cluster.Spec.Expose = &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
@@ -141,7 +142,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
logrus.Infof("Waiting for cluster to be available..")
if err := waitForCluster(ctx, client, cluster); err != nil {
if err := waitForCluster(ctx, client, cluster, config.timeout); err != nil {
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
}
@@ -169,17 +170,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
}
}
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
cluster := &v1alpha1.Cluster{
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Servers: ptr.To(int32(config.servers)),
Agents: ptr.To(int32(config.agents)),
ClusterCIDR: config.clusterCIDR,
@@ -189,9 +190,9 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
ServerEnvs: env(config.serverEnvs),
AgentEnvs: env(config.agentEnvs),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
Mode: v1beta1.ClusterMode(config.mode),
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
@@ -210,25 +211,25 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
}
if config.customCertsPath != "" {
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ClientCA: v1alpha1.CredentialSource{
Sources: v1beta1.CredentialSources{
ClientCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
},
ServerCA: v1alpha1.CredentialSource{
ServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
},
ETCDServerCA: v1alpha1.CredentialSource{
ETCDServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
},
ETCDPeerCA: v1alpha1.CredentialSource{
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
},
RequestHeaderCA: v1alpha1.CredentialSource{
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
},
ServiceAccountToken: v1alpha1.CredentialSource{
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
},
},
@@ -256,9 +257,8 @@ func env(envSlice []string) []v1.EnvVar {
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
interval := 5 * time.Second
timeout := 2 * time.Minute
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKeyFromObject(cluster)
@@ -267,12 +267,12 @@ func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alp
}
// If resource ready -> stop polling
if cluster.Status.Phase == v1alpha1.ClusterReady {
if cluster.Status.Phase == v1beta1.ClusterReady {
return true, nil
}
// If resource failed -> stop polling with an error
if cluster.Status.Phase == v1alpha1.ClusterFailed {
if cluster.Status.Phase == v1beta1.ClusterFailed {
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
}

View File

@@ -2,11 +2,12 @@ package cmds
import (
"errors"
"time"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
@@ -16,7 +17,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
@@ -28,6 +29,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
cmd.Flags().DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h).")
}
func validateCreateConfig(cfg *CreateConfig) error {
@@ -36,8 +38,8 @@ func validateCreateConfig(cfg *CreateConfig) error {
}
if cfg.persistenceType != "" {
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
switch v1beta1.PersistenceMode(cfg.persistenceType) {
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
@@ -50,7 +52,7 @@ func validateCreateConfig(cfg *CreateConfig) error {
if cfg.mode != "" {
switch cfg.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)

View File

@@ -14,7 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -50,7 +50,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
cluster := v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
@@ -86,7 +86,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
}
}
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error {
var secret v1.Secret
key := types.NamespacedName{

View File

@@ -10,7 +10,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
@@ -32,7 +32,7 @@ func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
return err
}

View File

@@ -18,7 +18,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -83,7 +83,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
Namespace: appCtx.Namespace(cfg.name),
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
@@ -128,7 +128,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
}
}
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}

View File

@@ -13,7 +13,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
)
@@ -30,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
@@ -51,7 +51,7 @@ func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateCo
client := appCtx.Client
policyName := args[0]
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
return err
}
@@ -81,18 +81,18 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: mode,
},
}

View File

@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []stri
client := appCtx.Client
name := args[0]
policy := &v1alpha1.VirtualClusterPolicy{}
policy := &v1beta1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {

View File

@@ -9,7 +9,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) erro
ctx := context.Background()
client := appCtx.Client
var policies v1alpha1.VirtualClusterPolicyList
var policies v1beta1.VirtualClusterPolicyList
if err := client.List(ctx, &policies); err != nil {
return err
}

View File

@@ -16,7 +16,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
)
@@ -51,7 +51,7 @@ func NewRootCmd() *cobra.Command {
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})

View File

@@ -25,7 +25,7 @@ func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []a
}
for _, version := range crd.Spec.Versions {
if version.Name == "v1alpha1" {
if version.Name == "v1beta1" {
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
break
}

View File

@@ -22,7 +22,7 @@ This example creates a "shared" mode K3k cluster with:
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: my-virtual-cluster

View File

@@ -33,6 +33,7 @@ k3kcli cluster create [command options] NAME
--service-cidr string service CIDR
--storage-class-name string storage class name for dynamic persistence type
--storage-request-size string storage size for dynamic persistence type
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
--token string token of the cluster
--version string k3s version
```

View File

@@ -1,10 +1,10 @@
# API Reference
## Packages
- [k3k.io/v1alpha1](#k3kiov1alpha1)
- [k3k.io/v1beta1](#k3kiov1beta1)
## k3k.io/v1alpha1
## k3k.io/v1beta1
### Resource Types
@@ -47,7 +47,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
@@ -65,7 +65,7 @@ ClusterList is a list of Cluster resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `ClusterList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[Cluster](#cluster) array_ | | | |
@@ -152,7 +152,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -208,7 +208,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
| `enabled` _boolean_ | Enabled toggles this feature on or off. | true | |
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
@@ -226,7 +226,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
| `loadBalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
@@ -260,7 +260,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -313,7 +313,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
#### PersistenceMode
@@ -342,7 +342,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -373,7 +373,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -390,7 +390,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -407,7 +407,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
@@ -426,7 +426,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
| `configmaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
| `configMaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
@@ -447,7 +447,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
@@ -465,7 +465,7 @@ VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicyList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |

View File

@@ -130,7 +130,7 @@ Create then the virtual cluster exposing through NodePort one of the ports that
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -17,7 +17,7 @@ This guide walks through the various ways to create and manage virtual clusters
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ingress
@@ -46,7 +46,7 @@ This will create a virtual cluster in `shared` mode and expose it via an ingress
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-persistent
@@ -80,7 +80,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ha
@@ -105,7 +105,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-virtual
@@ -136,7 +136,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
@@ -162,7 +162,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
@@ -189,7 +189,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-resourced
@@ -216,7 +216,7 @@ This configures the CPU and memory limit for the virtual cluster.
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-node-placed
@@ -259,7 +259,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-http-proxy

View File

@@ -37,7 +37,7 @@ If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g
```yaml
# Example of a minimal VCP (after creation with defaults)
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: my-default-policy
@@ -56,7 +56,7 @@ You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster`
**Example:** Allow only "shared" mode clusters.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: shared-only-policy
@@ -74,7 +74,7 @@ You can define resource consumption limits for bound Namespaces by specifying a
**Example:** Set CPU, memory, and pod limits.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: quota-policy
@@ -93,7 +93,7 @@ You can define default resource requests/limits and min/max constraints for cont
**Example:** Define default CPU requests/limits and min/max CPU.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: limit-policy
@@ -118,7 +118,7 @@ By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network
**Example:** Disable the default NetworkPolicy.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: no-default-netpol-policy
@@ -133,7 +133,7 @@ You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admiss
**Example:** Enforce the "baseline" PSS level.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: baseline-psa-policy

View File

@@ -1,19 +0,0 @@
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: example1
spec:
mode: "shared"
servers: 1
agents: 3
token: test
version: v1.26.0-k3s2
clusterCIDR: 10.30.0.0/16
serviceCIDR: 10.31.0.0/16
clusterDNS: 10.30.0.10
serverArgs:
- "--write-kubeconfig-mode=777"
expose:
ingress:
enabled: true
ingressClassName: "nginx"

View File

@@ -0,0 +1,15 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: shared-multiple-servers
spec:
mode: shared
servers: 3
agents: 3
version: v1.33.1-k3s1
serverArgs:
- "--write-kubeconfig-mode=777"
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -0,0 +1,14 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: shared-single-server
spec:
mode: shared
servers: 1
version: v1.33.1-k3s1
serverArgs:
- "--write-kubeconfig-mode=777"
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -1,19 +0,0 @@
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: single-server
spec:
mode: "shared"
servers: 1
agents: 3
token: test
version: v1.26.0-k3s2
clusterCIDR: 10.30.0.0/16
serviceCIDR: 10.31.0.0/16
clusterDNS: 10.30.0.10
serverArgs:
- "--write-kubeconfig-mode=777"
expose:
ingress:
enabled: true
ingressClassName: "nginx"

View File

@@ -0,0 +1,13 @@
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: virtual-server
spec:
mode: virtual
servers: 3
agents: 3
version: v1.33.1-k3s1
tlsSANs:
- myserver.app
expose:
nodePort: {}

View File

@@ -1,9 +1,9 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: policy-example
# spec:
# disableNetworkPolicy: false
# allowedMode: "shared"
spec:
allowedMode: shared
disableNetworkPolicy: true
# podSecurityAdmissionLevel: "baseline"
# defaultPriorityClass: "lowpriority"

21
go.mod
View File

@@ -11,6 +11,7 @@ replace (
)
require (
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
@@ -42,16 +43,6 @@ require (
sigs.k8s.io/controller-runtime v0.19.4
)
require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
@@ -64,6 +55,7 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -98,13 +90,13 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -161,6 +153,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -171,13 +164,17 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -218,7 +215,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect

View File

@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -57,7 +57,7 @@ func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, c
}
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -86,7 +86,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
@@ -100,6 +100,10 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtualConfigMap.DeletionTimestamp.IsZero() {
// deleting the synced configMap if exist

View File

@@ -12,7 +12,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -21,7 +21,7 @@ import (
var ConfigMapTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -35,18 +35,11 @@ var ConfigMapTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
ConfigMaps: v1alpha1.ConfigMapSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -53,7 +53,7 @@ func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
}
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -85,7 +85,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtIngress networkingv1.Ingress
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
@@ -97,6 +97,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
syncedIngress := r.ingress(&virtIngress)
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}

View File

@@ -14,7 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -23,7 +23,7 @@ import (
var IngressTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -37,14 +37,14 @@ var IngressTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Ingresses: v1alpha1.IngressSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Ingresses: v1beta1.IngressSyncConfig{
Enabled: true,
},
},

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -53,7 +53,7 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
}
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -83,7 +83,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
var (
virtPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var PVCTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,18 +36,11 @@ var PVCTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
PersistentVolumeClaims: v1alpha1.PersistentVolumeClaimSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())

View File

@@ -5,6 +5,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -15,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -50,7 +51,7 @@ func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager,
}
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -71,7 +72,7 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
var (
virtPod v1.Pod
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
@@ -113,9 +114,14 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
return ctrlruntimeclient.IgnoreNotFound(err)
}
pv := r.pseudoPV(&pvc)
if pod.DeletionTimestamp != nil {
return r.handlePodDeletion(ctx, pv)
}
log.Info("Creating pseudo Persistent Volume")
pv := r.pseudoPV(&pvc)
if err := r.VirtualClient.Create(ctx, pv); err != nil {
return ctrlruntimeclient.IgnoreAlreadyExists(err)
}
@@ -188,3 +194,22 @@ func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVo
},
}
}
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
var currentPV v1.PersistentVolume
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), &currentPV); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
pvPatch := currentPV.DeepCopy()
pvPatch.Spec.ClaimRef = nil
pvPatch.Status.Phase = v1.VolumeReleased
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
return err
}
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, &currentPV))
}

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var PriorityClassTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,14 +36,14 @@ var PriorityClassTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
PriorityClasses: v1alpha1.PriorityClassSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PriorityClasses: v1beta1.PriorityClassSyncConfig{
Enabled: true,
},
},

View File

@@ -18,7 +18,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -74,7 +74,7 @@ var ignoreSystemPrefixPredicate = predicate.Funcs{
}
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -104,7 +104,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
var (
priorityClass schedulingv1.PriorityClass
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
@@ -117,6 +117,10 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
hostPriorityClass := r.translatePriorityClass(priorityClass)
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !priorityClass.DeletionTimestamp.IsZero() {
// deleting the synced service if exists

View File

@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -57,7 +57,7 @@ func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clus
}
func (r *SecretSyncer) filterResources(object client.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -86,7 +86,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
@@ -100,6 +100,10 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
syncedSecret := s.translateSecret(&virtualSecret)
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtualSecret.DeletionTimestamp.IsZero() {
// deleting the synced secret if exist

View File

@@ -12,7 +12,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -21,7 +21,7 @@ import (
var SecretTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -35,18 +35,11 @@ var SecretTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Secrets: v1alpha1.SecretSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -63,7 +63,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
@@ -75,6 +75,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
syncedService := r.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
return reconcile.Result{}, err
}
@@ -120,7 +121,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var ServiceTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,18 +36,11 @@ var ServiceTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Services: v1alpha1.ServiceSyncConfig{
Enabled: true,
},
},
},
}
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())

View File

@@ -20,7 +20,7 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -119,7 +119,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
@@ -174,7 +174,7 @@ var _ = Describe("Kubelet Controller", func() {
Describe("PersistentVolumeClaim Syncer", PVCTests)
})
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
func translateName(cluster v1beta1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
@@ -20,11 +21,10 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
)
const (
webhookName = "podmutator.k3k.io"
webhookName = "podmutating.k3k.io"
webhookTimeout = int32(10)
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
@@ -36,14 +36,14 @@ type webhookHandler struct {
serviceName string
clusterName string
clusterNamespace string
logger *log.Logger
logger logr.Logger
webhookPort int
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -54,7 +54,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
webhookPort: webhookPort,
}
// create mutator webhook configuration to the cluster
// create mutating webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
@@ -75,7 +75,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
@@ -100,7 +100,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
w.logger.Info("extracting webhook tls from host cluster")
var webhookTLSSecret v1.Secret

View File

@@ -11,11 +11,11 @@ import (
"os"
"time"
"github.com/go-logr/zapr"
"github.com/go-logr/logr"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/log/klogv2"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
@@ -23,6 +23,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -38,26 +39,22 @@ import (
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
)
var (
baseScheme = runtime.NewScheme()
k3kKubeletName = "k3k-kubelet"
)
var baseScheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(baseScheme)
_ = v1alpha1.AddToScheme(baseScheme)
_ = v1beta1.AddToScheme(baseScheme)
}
type kubelet struct {
virtualCluster v1alpha1.Cluster
virtualCluster v1beta1.Cluster
name string
port int
@@ -70,11 +67,11 @@ type kubelet struct {
hostMgr manager.Manager
virtualMgr manager.Manager
node *nodeutil.Node
logger *k3klog.Logger
logger logr.Logger
token string
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
@@ -97,7 +94,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
ctrl.SetLogger(logger)
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
@@ -150,10 +147,10 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
logger.Info("adding pod mutating webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
}
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
@@ -173,7 +170,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
}
var virtualCluster v1alpha1.Cluster
var virtualCluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
}
@@ -189,7 +186,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
hostMgr: hostMgr,
virtualMgr: virtualMgr,
agentIP: clusterIP,
logger: logger.Named(k3kKubeletName),
logger: logger,
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
port: c.KubeletPort,
@@ -211,9 +208,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
func (k *kubelet) registerNode(agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
@@ -231,34 +228,36 @@ func (k *kubelet) start(ctx context.Context) {
go func() {
err := k.hostMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("host manager stopped", zap.Error(err))
k.logger.Error(err, "host manager stopped")
}
}()
go func() {
err := k.virtualMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
k.logger.Error(err, "virtual manager stopped")
}
}()
// run the node async so that we can wait for it to be ready in another call
go func() {
ctx = log.WithLogger(ctx, k.logger)
klog.SetLogger(k.logger)
ctx = log.WithLogger(ctx, klogv2.New(nil))
if err := k.node.Run(ctx); err != nil {
k.logger.Fatalw("node errored when running", zap.Error(err))
k.logger.Error(err, "node errored when running")
}
}()
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
k.logger.Error(err, "node was not ready within timeout of 1 minute")
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
k.logger.Error(err, "node stopped with an error")
}
k.logger.Info("node exited successfully")
@@ -277,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
@@ -288,7 +287,7 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
@@ -299,12 +298,12 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
@@ -318,7 +317,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Infow("decoded bootstrap", zap.Error(err))
logger.Error(err, "decoded bootstrap")
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
@@ -369,17 +368,10 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
return clientcmd.Write(*config)
}
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var (
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
@@ -429,7 +421,7 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
}
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
objKey := types.NamespacedName{
Namespace: c.ClusterNamespace,

View File

@@ -7,12 +7,12 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
@@ -22,8 +22,9 @@ import (
var (
configFile string
cfg config
logger *log.Logger
logger logr.Logger
debug bool
logFormat string
)
func main() {
@@ -34,13 +35,16 @@ func main() {
if err := InitializeConfig(cmd); err != nil {
return err
}
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrlruntimelog.SetLogger(logger)
return nil
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Enable debug logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
@@ -53,7 +57,6 @@ func main() {
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
if err := rootCmd.Execute(); err != nil {
@@ -73,7 +76,7 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
if err := k.registerNode(k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -12,16 +13,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Fatal("error getting host node for mirroring", err)
logger.Error(err, "error getting host node for mirroring", err)
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -56,7 +56,7 @@ func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, se
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
logger.Error(err, "error updating node capacity")
}
}
}()

View File

@@ -12,6 +12,7 @@ import (
"strings"
"time"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
@@ -31,6 +32,7 @@ import (
dto "github.com/prometheus/client_model/go"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
@@ -39,9 +41,8 @@ import (
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
@@ -60,12 +61,12 @@ type Provider struct {
ClusterName string
serverIP string
dnsIP string
logger *k3klog.Logger
logger logr.Logger
}
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
@@ -124,7 +125,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
return closer, err
}
@@ -198,7 +199,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
p.logger.V(1).Info("GetStatsSummary")
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
@@ -346,7 +347,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
@@ -402,7 +403,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Infow("creating pod",
p.logger.Info("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
@@ -488,7 +489,7 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
}
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Debugw("got a request for update pod")
p.logger.V(1).Info("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
@@ -518,7 +519,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
p.logger.Error(err, "error when updating ephemeral containers")
return err
}
@@ -589,15 +590,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
@@ -607,7 +613,7 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
@@ -629,14 +635,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
return pod.Status.DeepCopy(), nil
}
@@ -760,7 +766,7 @@ func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
return orig
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {

View File

@@ -23,7 +23,7 @@ const (
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters

View File

@@ -4,8 +4,10 @@ import (
"encoding/hex"
"strings"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -34,6 +36,13 @@ type ToHostTranslator struct {
ClusterNamespace string
}
func NewHostTranslator(cluster *v1beta1.Cluster) *ToHostTranslator {
return &ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
}
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
@@ -125,3 +134,11 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
return controller.SafeConcatName(namePrefix, nameSuffix)
}
// NamespacedName returns the types.NamespacedName of the resource in the host cluster
func (t *ToHostTranslator) NamespacedName(obj client.Object) types.NamespacedName {
return types.NamespacedName{
Namespace: t.ClusterNamespace,
Name: t.TranslateName(obj.GetNamespace(), obj.GetName()),
}
}

49
main.go
View File

@@ -9,9 +9,9 @@ import (
"os/signal"
"syscall"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/spf13/cobra"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -22,7 +22,7 @@ import (
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -38,12 +38,13 @@ var (
webhookPortRange string
maxConcurrentReconciles int
debug bool
logger *log.Logger
logFormat string
logger logr.Logger
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
}
func main() {
@@ -56,19 +57,20 @@ func main() {
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = log.New(debug)
logger = zapr.NewLogger(log.New(debug, logFormat))
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Debug level logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "shared-agent-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "shared-agent-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "agent-shared-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "virtual-agent-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "virtual-agent-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
@@ -77,7 +79,7 @@ func main() {
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
if err := rootCmd.Execute(); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
logger.Error(err, "failed to run k3k controller")
}
}
@@ -86,6 +88,7 @@ func run(cmd *cobra.Command, args []string) error {
defer stop()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
ctrlruntimelog.SetLogger(logger)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
@@ -99,8 +102,6 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
@@ -114,23 +115,35 @@ func run(cmd *cobra.Command, args []string) error {
}
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
return fmt.Errorf("failed to add cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
logger.Info("adding statefulset controller")
if err := cluster.AddStatefulSetController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add statefulset controller: %v", err)
}
logger.Info("adding service controller")
if err := cluster.AddServiceController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add service controller: %v", err)
}
logger.Info("adding pod controller")
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
return fmt.Errorf("failed to add pod controller: %v", err)
}
logger.Info("adding clusterpolicy controller")
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
return fmt.Errorf("failed to add clusterpolicy controller: %v", err)
}
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("failed to start the manager: %v", err)
return fmt.Errorf("failed to start manager: %v", err)
}
logger.Info("controller manager stopped")

View File

@@ -1,3 +1,3 @@
// +k8s:deepcopy-gen=package
// +groupName=k3k.io
package v1alpha1
package v1beta1

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
@@ -10,7 +10,7 @@ import (
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"}
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemBuilder.AddToScheme
)

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
v1 "k8s.io/api/core/v1"
@@ -103,7 +103,7 @@ type ClusterSpec struct {
// Expose specifies options for exposing the API server.
// By default, it's only exposed as a ClusterIP.
//
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
// +optional
Expose *ExposeConfig `json:"expose,omitempty"`
@@ -176,7 +176,7 @@ type ClusterSpec struct {
// CustomCAs specifies the cert/key pairs for custom CA certificates.
//
// +optional
CustomCAs CustomCAs `json:"customCAs,omitempty"`
CustomCAs *CustomCAs `json:"customCAs,omitempty"`
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
//
@@ -190,32 +190,40 @@ type SyncConfig struct {
// Services resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
Services ServiceSyncConfig `json:"services,omitempty"`
// +optional
Services ServiceSyncConfig `json:"services"`
// ConfigMaps resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
ConfigMaps ConfigMapSyncConfig `json:"configmaps,omitempty"`
// +optional
ConfigMaps ConfigMapSyncConfig `json:"configMaps"`
// Secrets resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
Secrets SecretSyncConfig `json:"secrets,omitempty"`
// +optional
Secrets SecretSyncConfig `json:"secrets"`
// Ingresses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
Ingresses IngressSyncConfig `json:"ingresses,omitempty"`
// +optional
Ingresses IngressSyncConfig `json:"ingresses"`
// PersistentVolumeClaims resources sync configuration.
//
// +kubebuilder:default={"enabled": true}
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims,omitempty"`
// +optional
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims"`
// PriorityClasses resources sync configuration.
//
// +kubebuilder:default={"enabled": false}
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses,omitempty"`
// +optional
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
}
// SecretSyncConfig specifies the sync options for services.
type SecretSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
//
// +kubebuilder:default=true
// +optional
Enabled bool `json:"enabled,omitempty"`
@@ -229,8 +237,10 @@ type SecretSyncConfig struct {
// ServiceSyncConfig specifies the sync options for services.
type ServiceSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
@@ -242,8 +252,10 @@ type ServiceSyncConfig struct {
// ConfigMapSyncConfig specifies the sync options for services.
type ConfigMapSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
@@ -255,8 +267,10 @@ type ConfigMapSyncConfig struct {
// IngressSyncConfig specifies the sync options for services.
type IngressSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
@@ -268,8 +282,10 @@ type IngressSyncConfig struct {
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
type PersistentVolumeClaimSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=true
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
@@ -281,8 +297,10 @@ type PersistentVolumeClaimSyncConfig struct {
// PriorityClassSyncConfig specifies the sync options for services.
type PriorityClassSyncConfig struct {
// Enabled is an on/off switch for syncing resources.
// +optional
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=false
// +required
Enabled bool `json:"enabled"`
// Selector specifies set of labels of the resources that will be synced, if empty
// then all resources of the given type will be synced.
@@ -343,7 +361,7 @@ type PersistenceConfig struct {
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="1G"
// +kubebuilder:default="2G"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
@@ -358,7 +376,7 @@ type ExposeConfig struct {
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
//
// +optional
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
LoadBalancer *LoadBalancerConfig `json:"loadBalancer,omitempty"`
// NodePort specifies options for exposing the API server through NodePort.
//
@@ -416,32 +434,34 @@ type NodePortConfig struct {
// CustomCAs specifies the cert/key pairs for custom CA certificates.
type CustomCAs struct {
// Enabled toggles this feature on or off.
Enabled bool `json:"enabled,omitempty"`
//
// +kubebuilder:default=true
Enabled bool `json:"enabled"`
// Sources defines the sources for all required custom CA certificates.
Sources CredentialSources `json:"sources,omitempty"`
Sources CredentialSources `json:"sources"`
}
// CredentialSources lists all the required credentials, including both
// TLS key pairs and single signing keys.
type CredentialSources struct {
// ServerCA specifies the server-ca cert/key pair.
ServerCA CredentialSource `json:"serverCA,omitempty"`
ServerCA CredentialSource `json:"serverCA"`
// ClientCA specifies the client-ca cert/key pair.
ClientCA CredentialSource `json:"clientCA,omitempty"`
ClientCA CredentialSource `json:"clientCA"`
// RequestHeaderCA specifies the request-header-ca cert/key pair.
RequestHeaderCA CredentialSource `json:"requestHeaderCA,omitempty"`
RequestHeaderCA CredentialSource `json:"requestHeaderCA"`
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
ETCDServerCA CredentialSource `json:"etcdServerCA,omitempty"`
ETCDServerCA CredentialSource `json:"etcdServerCA"`
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
ETCDPeerCA CredentialSource `json:"etcdPeerCA,omitempty"`
ETCDPeerCA CredentialSource `json:"etcdPeerCA"`
// ServiceAccountToken specifies the service-account-token key.
ServiceAccountToken CredentialSource `json:"serviceAccountToken,omitempty"`
ServiceAccountToken CredentialSource `json:"serviceAccountToken"`
}
// CredentialSource defines where to get a credential from.
@@ -451,8 +471,7 @@ type CredentialSource struct {
// The controller expects specific keys inside based on the credential type:
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
// - For ServiceAccountTokenKey: 'tls.key'.
// +optional
SecretName string `json:"secretName,omitempty"`
SecretName string `json:"secretName"`
}
// ClusterStatus reflects the observed state of a Cluster.

View File

@@ -2,7 +2,7 @@
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
"k8s.io/api/core/v1"
@@ -163,7 +163,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val.DeepCopy()
}
}
out.CustomCAs = in.CustomCAs
if in.CustomCAs != nil {
in, out := &in.CustomCAs, &out.CustomCAs
*out = new(CustomCAs)
**out = **in
}
if in.Sync != nil {
in, out := &in.Sync, &out.Sync
*out = new(SyncConfig)

View File

@@ -11,7 +11,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -24,12 +24,12 @@ type ResourceEnsurer interface {
}
type Config struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client ctrlruntimeclient.Client
scheme *runtime.Scheme
}
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
return &Config{
cluster: cluster,
client: client,
@@ -42,11 +42,8 @@ func configSecretName(clusterName string) string {
}
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
log := ctrl.LoggerFrom(ctx)
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
log := ctrl.LoggerFrom(ctx).WithValues("key", key)
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
return err
@@ -54,11 +51,15 @@ func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object
if err := cfg.client.Create(ctx, obj); err != nil {
if apierrors.IsAlreadyExists(err) {
log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj))
return cfg.client.Update(ctx, obj)
}
return err
}
log.V(1).Info(fmt.Sprintf("Creating %T.", obj))
return nil
}

View File

@@ -19,7 +19,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
@@ -99,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion

View File

@@ -8,12 +8,12 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_sharedAgentData(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
serviceName string
ip string
kubeletPort int
@@ -29,12 +29,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "simple config",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
@@ -59,15 +59,15 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "version in status",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},
@@ -92,12 +92,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "missing version in spec",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},

View File

@@ -0,0 +1,35 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
v1 "k8s.io/api/core/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
// newVirtualClient creates a new Client that can be used to interact with the virtual cluster
func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) {
var clusterKubeConfig v1.Secret
kubeconfigSecretName := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"),
Namespace: clusterNamespace,
}
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
if err != nil {
return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err)
}
return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{})
}

View File

@@ -33,7 +33,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -46,7 +46,6 @@ const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
defaultVirtualClusterCIDR = "10.52.0.0/16"
@@ -118,7 +117,7 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
@@ -149,7 +148,7 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
var clusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
@@ -162,12 +161,10 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
log := ctrl.LoggerFrom(ctx)
log.Info("Reconciling Cluster")
log.Info("reconciling cluster")
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -178,8 +175,10 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
log.V(1).Info("Updating Cluster status phase")
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -196,6 +195,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// add finalizer
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
log.V(1).Info("Updating Cluster adding finalizer")
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -208,6 +209,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
reconcilerErr := c.reconcileCluster(ctx, &cluster)
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
log.Info("Updating Cluster status")
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -216,7 +219,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
log.Info("server not ready, requeueing")
log.V(1).Info("Server not ready, requeueing")
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
}
@@ -225,6 +228,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// update Cluster if needed
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
log.Info("Updating Cluster")
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -233,14 +238,14 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(cluster, err)
c.updateStatus(ctx, cluster, err)
return err
}
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
var ns v1.Namespace
@@ -252,7 +257,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.PolicyName = policyName
if found && policyName != "" {
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
return err
}
@@ -265,7 +270,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
log.Info("cluster version not set")
log.V(1).Info("Cluster version not set. Using host version.")
hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
@@ -287,7 +292,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
}
}
@@ -295,8 +300,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
log.V(1).Info("Looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
if err != nil {
@@ -307,8 +312,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
log.V(1).Info("assign default service CIDR for virtual mode")
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
}
@@ -353,9 +358,9 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")
log.V(1).Info("Ensuring bootstrap secret")
bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
if err != nil {
@@ -385,9 +390,9 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
}
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
log.V(1).Info("Ensuring Kubeconfig Secret")
adminKubeconfig := kubeconfig.New()
@@ -423,7 +428,7 @@ func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster
return err
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
if err != nil {
@@ -459,9 +464,9 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
log.V(1).Info("Ensuring network policy")
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
@@ -545,15 +550,15 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
key := client.ObjectKeyFromObject(currentNetworkPolicy)
if result != controllerutil.OperationResultNone {
log.Info("cluster network policy updated", "key", key, "result", result)
log.V(1).Info("Cluster network policy updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
log.V(1).Info("Ensuring Cluster Service")
expectedService := server.Service(cluster)
currentService := expectedService.DeepCopy()
@@ -573,15 +578,15 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
key := client.ObjectKeyFromObject(currentService)
if result != controllerutil.OperationResultNone {
log.Info("cluster service updated", "key", key, "result", result)
log.V(1).Info("Cluster service updated", "key", key, "result", result)
}
return currentService, nil
}
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster ingress")
log.V(1).Info("Ensuring cluster ingress")
expectedServerIngress := server.Ingress(ctx, cluster)
@@ -609,13 +614,13 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
key := client.ObjectKeyFromObject(currentServerIngress)
if result != controllerutil.OperationResultNone {
log.Info("cluster ingress updated", "key", key, "result", result)
log.V(1).Info("Cluster ingress updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
// create headless service for the statefulset
@@ -635,6 +640,9 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
// Add the finalizer to the StatefulSet so the statefulset controller can handle cleanup.
controllerutil.AddFinalizer(expectedServerStatefulSet, etcdPodFinalizerName)
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
@@ -648,13 +656,13 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
if result != controllerutil.OperationResultNone {
key := client.ObjectKeyFromObject(currentServerStatefulSet)
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
log.V(1).Info("Ensuring server StatefulSet", "key", key, "result", result)
}
return err
}
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error
@@ -684,7 +692,7 @@ func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alp
return err
}
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
config := agent.NewConfig(cluster, c.Client, c.Scheme)
var agentEnsurer agent.ResourceEnsurer
@@ -719,7 +727,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
return agentEnsurer.EnsureResources(ctx)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.VirtualClusterPolicy) error {
if cluster.Name == ClusterInvalidName {
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
}
@@ -728,8 +736,8 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
}
if cluster.Spec.CustomCAs.Enabled {
if err := c.validateCustomCACerts(cluster); err != nil {
if cluster.Spec.CustomCAs != nil && cluster.Spec.CustomCAs.Enabled {
if err := c.validateCustomCACerts(cluster.Spec.CustomCAs.Sources); err != nil {
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
}
}
@@ -751,7 +759,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
// Try to look for the serviceCIDR creating a failing service.
// The error should contain the expected serviceCIDR
log.Info("looking up serviceCIDR from a failing service creation")
log.V(1).Info("Looking up Service CIDR from a failing service creation")
failingSvc := v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
@@ -763,7 +771,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
if len(splittedErrMsg) > 1 {
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from failing service creation: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
@@ -777,7 +785,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
log.Info("looking up serviceCIDR from kube-apiserver pod")
log.V(1).Info("Looking up Service CIDR from kube-apiserver pod")
matchingLabels := client.MatchingLabels(map[string]string{
"component": "kube-apiserver",
@@ -800,12 +808,12 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
for _, arg := range apiServerArgs {
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
log.V(1).Info("Found Service CIDR from kube-apiserver pod: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
if err != nil {
log.Error(err, "serviceCIDR is not valid")
log.Error(err, "Service CIDR is not valid")
break
}
@@ -820,8 +828,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
}
// validateCustomCACerts will make sure that all the cert secrets exists
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
credentialSources := cluster.Spec.CustomCAs.Sources
func (c *ClusterReconciler) validateCustomCACerts(credentialSources v1beta1.CredentialSources) error {
if credentialSources.ClientCA.SecretName == "" ||
credentialSources.ServerCA.SecretName == "" ||
credentialSources.ETCDPeerCA.SecretName == "" ||

View File

@@ -12,21 +12,23 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
coordinationv1 "k8s.io/api/coordination/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("finalizing Cluster")
log.V(1).Info("Deleting Cluster")
// Set the Terminating phase and condition
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,8 +41,8 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
}
// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.Info("dellocating ports for kubelet and webhook")
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.V(1).Info("dellocating ports for kubelet and webhook")
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
return reconcile.Result{}, err
@@ -51,8 +53,25 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
}
}
// delete API server lease
lease := &coordinationv1.Lease{
TypeMeta: metav1.TypeMeta{
Kind: "Lease",
APIVersion: "coordination.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name,
Namespace: cluster.Namespace,
},
}
if err := c.Client.Delete(ctx, lease); err != nil && !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
// Remove finalizer from the cluster and update it only when all resources are cleaned up
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
log.Info("Deleting Cluster removing finalizer")
if err := c.Client.Update(ctx, cluster); err != nil {
return reconcile.Result{}, err
}
@@ -61,7 +80,10 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Unbinding ClusterRoles")
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error

View File

@@ -17,7 +17,7 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -99,7 +99,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -12,7 +12,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -38,7 +38,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("creating a Cluster", func() {
It("will be created with some defaults", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
@@ -48,15 +48,28 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Mode).To(Equal(v1beta1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
Expect(cluster.Spec.CustomCAs).To(BeNil())
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
// sync
// enabled by default
Expect(cluster.Spec.Sync).To(Not(BeNil()))
Expect(cluster.Spec.Sync.ConfigMaps.Enabled).To(BeTrue())
Expect(cluster.Spec.Sync.PersistentVolumeClaims.Enabled).To(BeTrue())
Expect(cluster.Spec.Sync.Secrets.Enabled).To(BeTrue())
Expect(cluster.Spec.Sync.Services.Enabled).To(BeTrue())
// disabled by default
Expect(cluster.Spec.Sync.Ingresses.Enabled).To(BeFalse())
Expect(cluster.Spec.Sync.PriorityClasses.Enabled).To(BeFalse())
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
Expect(cluster.Status.Phase).To(Equal(v1beta1.ClusterUnknown))
serverVersion, err := k8s.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
@@ -92,14 +105,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with nodePort", func() {
It("will have a NodePort service", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
},
}
@@ -124,14 +137,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will have the specified ports exposed when specified", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ServerPort: ptr.To[int32](30010),
ETCDPort: ptr.To[int32](30011),
},
@@ -173,14 +186,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will not expose the port when out of range", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ETCDPort: ptr.To[int32](2222),
},
},
@@ -218,14 +231,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with loadbalancer", func() {
It("will have a LoadBalancer service with the default ports exposed", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
},
},
}
@@ -266,15 +279,15 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with nodePort and loadbalancer", func() {
It("will fail", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
NodePort: &v1alpha1.NodePortConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
NodePort: &v1beta1.NodePortConfig{},
},
},
}

View File

@@ -0,0 +1,38 @@
package cluster
import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func newClusterPredicate() predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
owner := metav1.GetControllerOf(object)
return owner != nil &&
owner.Kind == "Cluster" &&
owner.APIVersion == v1beta1.SchemeGroupVersion.String()
})
}
func clusterNamespacedName(object client.Object) types.NamespacedName {
var clusterName string
owner := metav1.GetControllerOf(object)
if owner != nil && owner.Kind == "Cluster" && owner.APIVersion == v1beta1.SchemeGroupVersion.String() {
clusterName = owner.Name
} else {
clusterName = object.GetLabels()[translate.ClusterNameLabel]
}
return types.NamespacedName{
Name: clusterName,
Namespace: object.GetNamespace(),
}
}

View File

@@ -2,36 +2,18 @@ package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
@@ -43,235 +25,54 @@ type PodReconciler struct {
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
// AddPodController adds a new controller for Pods to the manager.
// It will reconcile the Pods of the Host Cluster with the one of the Virtual Cluster.
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := PodReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
For(&v1.Pod{}).
Named(podController).
WithEventFilter(newClusterPredicate()).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("statefulset", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Reconciling Pod")
s := strings.Split(req.Name, "-")
if len(s) < 1 {
return reconcile.Result{}, nil
}
if s[0] != "k3k" {
return reconcile.Result{}, nil
}
clusterName := s[1]
var cluster v1alpha1.Cluster
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
var pod v1.Pod
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if len(podList.Items) == 1 {
return reconcile.Result{}, nil
// get cluster from the object
cluster := clusterNamespacedName(&pod)
virtualClient, err := newVirtualClient(ctx, r.Client, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
if !pod.DeletionTimestamp.IsZero() {
virtName := pod.GetAnnotations()[translate.ResourceNameAnnotation]
virtNamespace := pod.GetAnnotations()[translate.ResourceNamespaceAnnotation]
virtPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: virtName,
Namespace: virtNamespace,
},
}
log.V(1).Info("Deleting Virtual Pod", "name", virtName, "namespace", virtNamespace)
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
}
return reconcile.Result{}, nil
}
func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.Info("handling server pod")
role, found := pod.Labels["role"]
if !found {
return fmt.Errorf("server pod has no role label")
}
if role != "server" {
log.V(1).Info("pod has a different role: " + role)
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
return nil
}
func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("generating etcd TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}

View File

@@ -16,7 +16,7 @@ import (
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -39,7 +39,7 @@ type content struct {
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) {
func GenerateBootstrapData(ctx context.Context, cluster *v1beta1.Cluster, ip, token string) ([]byte, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, fmt.Errorf("failed to request bootstrap secret: %w", err)
@@ -162,7 +162,7 @@ func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
return bootstrap, nil
}
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster) (*ControlRuntimeBootstrap, error) {
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1beta1.Cluster) (*ControlRuntimeBootstrap, error) {
key := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,

View File

@@ -8,7 +8,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -45,15 +45,15 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
func serverConfigData(serviceIP string, cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
}
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
func initConfigData(cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\n" + serverOptions(cluster, token)
}
func serverOptions(cluster *v1alpha1.Cluster, token string) string {
func serverOptions(cluster *v1beta1.Cluster, token string) string {
var opts string
// TODO: generate token if not found

View File

@@ -8,7 +8,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -22,7 +22,7 @@ func IngressName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "ingress")
}
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingress {
func Ingress(ctx context.Context, cluster *v1beta1.Cluster) networkingv1.Ingress {
ingress := networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
@@ -52,7 +52,7 @@ func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingres
return ingress
}
func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
func ingressRules(cluster *v1beta1.Cluster) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil {

View File

@@ -13,12 +13,13 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -32,7 +33,7 @@ const (
// Server
type Server struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client client.Client
mode string
token string
@@ -41,7 +42,7 @@ type Server struct {
imagePullSecrets []string
}
func New(cluster *v1alpha1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
return &Server{
cluster: cluster,
client: client,
@@ -265,9 +266,13 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
replicas = *s.cluster.Spec.Servers
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
persistent = true
pvClaim = s.setupDynamicPersistence()
if err := controllerutil.SetControllerReference(s.cluster, &pvClaim, s.client.Scheme()); err != nil {
return nil, err
}
}
var (
@@ -330,7 +335,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
volumeMounts = append(volumeMounts, volumeMount)
}
if s.cluster.Spec.CustomCAs.Enabled {
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
vols, mounts, err := s.loadCACertBundle(ctx)
if err != nil {
return nil, err
@@ -379,7 +384,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
},
}
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
}
@@ -434,6 +439,10 @@ func (s *Server) setupStartCommand() (string, error) {
}
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
if s.cluster.Spec.CustomCAs == nil {
return nil, nil, fmt.Errorf("customCAs not found")
}
customCerts := s.cluster.Spec.CustomCAs.Sources
caCertMap := map[string]string{
"server-ca": customCerts.ServerCA.SecretName,

View File

@@ -6,11 +6,11 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func Service(cluster *v1beta1.Cluster) *v1.Service {
service := &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -69,7 +69,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
}
// addLoadBalancerPorts adds the load balancer ports to the service
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1beta1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified, use the default port
if loadbalancerConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
@@ -90,7 +90,7 @@ func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadB
}
// addNodePortPorts adds the node port ports to the service
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addNodePortPorts(service *v1.Service, nodePortConfig v1beta1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
if nodePortConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)

View File

@@ -0,0 +1,93 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
serviceController = "k3k-service-controller"
)
type ServiceReconciler struct {
HostClient ctrlruntimeclient.Client
}
// Add adds a new controller to the manager
func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
reconciler := ServiceReconciler{
HostClient: mgr.GetClient(),
}
return ctrl.NewControllerManagedBy(mgr).
Named(serviceController).
For(&v1.Service{}).
WithEventFilter(newClusterPredicate()).
Complete(&reconciler)
}
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Reconciling Service")
var hostService v1.Service
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// Some services are owned by the cluster but don't have the annotations set (i.e. the kubelet svc)
// They don't exists in the virtual cluster, so we can skip them
virtualServiceName, virtualServiceNameFound := hostService.Annotations[translate.ResourceNameAnnotation]
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]
if !virtualServiceNameFound || !virtualServiceNamespaceFound {
log.V(1).Info(fmt.Sprintf("Service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
return reconcile.Result{}, nil
}
// get cluster from the object
cluster := clusterNamespacedName(&hostService)
virtualClient, err := newVirtualClient(ctx, r.HostClient, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get cluster info: %v", err)
}
if !hostService.DeletionTimestamp.IsZero() {
return reconcile.Result{}, nil
}
virtualServiceKey := types.NamespacedName{
Name: virtualServiceName,
Namespace: virtualServiceNamespace,
}
var virtualService v1.Service
if err := virtualClient.Get(ctx, virtualServiceKey, &virtualService); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get virtual service: %v", err)
}
if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
log.V(1).Info("Updating Virtual Service Status", "name", virtualServiceName, "namespace", virtualServiceNamespace)
virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer
if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}

View File

@@ -0,0 +1,330 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
const (
statefulsetController = "k3k-statefulset-controller"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
)
type StatefulSetReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := StatefulSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
For(&apps.StatefulSet{}).
Owns(&v1.Pod{}).
Named(statefulsetController).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("Reconciling StatefulSet")
var sts apps.StatefulSet
if err := p.Client.Get(ctx, req.NamespacedName, &sts); err != nil {
// we can ignore the IsNotFound error
// if the stateful set was deleted we have already cleaned up the pods
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// If the StatefulSet is being deleted, we need to remove the finalizers from its pods
// and remove the finalizer from the StatefulSet itself.
if !sts.DeletionTimestamp.IsZero() {
return p.handleDeletion(ctx, &sts)
}
// get cluster name from the object
clusterKey := clusterNamespacedName(&sts)
var cluster v1beta1.Cluster
if err := p.Client.Get(ctx, clusterKey, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
podList, err := p.listPods(ctx, &sts)
if err != nil {
return reconcile.Result{}, err
}
if len(podList.Items) == 1 {
serverPod := podList.Items[0]
if !serverPod.DeletionTimestamp.IsZero() {
if controllerutil.RemoveFinalizer(&serverPod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, &serverPod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1beta1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Handling Server Pod")
if pod.DeletionTimestamp.IsZero() {
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
log.V(1).Info("Server Pod is being deleted. Removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
return p.Client.Update(ctx, pod)
}
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
log.V(1).Info("Cluster was deleted. Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
log.V(1).Info("Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Generating ETCD TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.V(1).Info("Removing member from ETCD", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *StatefulSetReconciler) clusterToken(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}
func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.StatefulSet) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
podList, err := p.listPods(ctx, sts)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) {
log.V(1).Info("Updating Server Pod removing finalizer", "name", pod.Name, "namespace", pod.Namespace)
if err := p.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
}
}
if controllerutil.RemoveFinalizer(sts, etcdPodFinalizerName) {
return reconcile.Result{}, p.Client.Update(ctx, sts)
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) listPods(ctx context.Context, sts *apps.StatefulSet) (*v1.PodList, error) {
selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("failed to create selector from statefulset: %w", err)
}
listOpts := &ctrlruntimeclient.ListOptions{
Namespace: sts.Namespace,
LabelSelector: selector,
}
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return nil, ctrlruntimeclient.IgnoreNotFound(err)
}
return &podList, nil
}

View File

@@ -1,14 +1,16 @@
package cluster
import (
"context"
"errors"
"k8s.io/apimachinery/pkg/api/meta"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
@@ -24,9 +26,12 @@ const (
ReasonTerminating = "Terminating"
)
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
func (c *ClusterReconciler) updateStatus(ctx context.Context, cluster *v1beta1.Cluster, reconcileErr error) {
log := ctrl.LoggerFrom(ctx)
log.V(1).Info("Updating Cluster Conditions")
if !cluster.DeletionTimestamp.IsZero() {
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,7 +44,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// Handle validation errors specifically to set the Pending phase.
if errors.Is(reconcileErr, ErrClusterValidation) {
cluster.Status.Phase = v1alpha1.ClusterPending
cluster.Status.Phase = v1beta1.ClusterPending
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -53,7 +58,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -66,7 +71,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// If there's an error, but it's not a validation error, the cluster is in a failed state.
if reconcileErr != nil {
cluster.Status.Phase = v1alpha1.ClusterFailed
cluster.Status.Phase = v1beta1.ClusterFailed
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -80,7 +85,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
// If we reach here, everything is successful.
cluster.Status.Phase = v1alpha1.ClusterReady
cluster.Status.Phase = v1beta1.ClusterReady
newCondition := metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionTrue,

View File

@@ -15,11 +15,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
if cluster.Spec.TokenSecretRef == nil {
return c.ensureTokenSecret(ctx, cluster)
}
@@ -42,7 +42,7 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
return string(tokenSecret.Data["token"]), nil
}
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
log := ctrl.LoggerFrom(ctx)
// check if the secret is already created
@@ -62,7 +62,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
return string(tokenSecret.Data["token"]), nil
}
log.Info("Token secret is not specified, creating a random token")
log.V(1).Info("Token secret is not specified, creating a random token")
token, err := random(16)
if err != nil {
@@ -77,7 +77,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
})
if result != controllerutil.OperationResultNone {
log.Info("ensuring tokenSecret", "key", key, "result", result)
log.V(1).Info("Ensuring tokenSecret", "key", key, "result", result)
}
return token, err

View File

@@ -9,7 +9,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -28,7 +28,7 @@ var Backoff = wait.Backoff{
// Image returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the latest version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
image := k3SImage
imageVersion := "latest"

View File

@@ -7,12 +7,12 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_K3S_Image(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
k3sImage string
}
@@ -25,12 +25,12 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with assigned version spec",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
@@ -41,12 +41,12 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with empty version spec and assigned hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v4.5.6",
},
},
@@ -57,7 +57,7 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with empty version spec and empty hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",

View File

@@ -17,7 +17,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -39,7 +39,7 @@ func New() *KubeConfig {
}
}
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster)
if err != nil {
return nil, err
@@ -93,7 +93,7 @@ func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi
return config
}
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, serverPort int) (string, error) {
func getURLFromService(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, serverPort int) (string, error) {
// get the server service to extract the right IP
key := types.NamespacedName{
Name: server.ServiceName(cluster.Name),

View File

@@ -11,13 +11,13 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling PSA labels")
log.V(1).Info("Reconciling PSA labels")
// cleanup of old labels
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
@@ -33,7 +33,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
// skip the 'warn' only for the privileged PSA level
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
if psaLevel != v1beta1.PrivilegedPodSecurityAdmissionLevel {
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
}
@@ -44,7 +44,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
log := ctrl.LoggerFrom(ctx)
log.Info("deleting resources")
log.V(1).Info("Cleanup Namespace resources")
var namespaces v1.NamespaceList
if err := c.Client.List(ctx, &namespaces); err != nil {

View File

@@ -11,13 +11,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling NetworkPolicy")
log.V(1).Info("Reconciling NetworkPolicy")
var cidrList []string
@@ -46,20 +46,25 @@ func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Cont
// if disabled then delete the existing network policy
if policy.Spec.DisableNetworkPolicy {
err := c.Client.Delete(ctx, networkPolicy)
return client.IgnoreNotFound(err)
log.V(1).Info("Deleting NetworkPolicy")
return client.IgnoreNotFound(c.Client.Delete(ctx, networkPolicy))
}
log.V(1).Info("Creating NetworkPolicy")
// otherwise try to create/update
err := c.Client.Create(ctx, networkPolicy)
if apierrors.IsAlreadyExists(err) {
log.V(1).Info("NetworkPolicy already exists, updating.")
return c.Client.Update(ctx, networkPolicy)
}
return err
}
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
func networkPolicy(namespaceName string, policy *v1beta1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
return &networkingv1.NetworkPolicy{
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
@@ -46,10 +46,10 @@ func Add(mgr manager.Manager, clusterCIDR string, maxConcurrentReconciles int) e
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.VirtualClusterPolicy{}).
For(&v1beta1.VirtualClusterPolicy{}).
Watches(&v1.Namespace{}, namespaceEventHandler()).
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
Watches(&v1beta1.Cluster{}, clusterEventHandler(&reconciler)).
Owns(&networkingv1.NetworkPolicy{}).
Owns(&v1.ResourceQuota{}).
Owns(&v1.LimitRange{}).
@@ -129,7 +129,7 @@ func namespaceEventHandler() handler.Funcs {
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// enqueue all the available VirtualClusterPolicies
enqueueAllVCPs := func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
vcpList := &v1alpha1.VirtualClusterPolicyList{}
vcpList := &v1beta1.VirtualClusterPolicyList{}
if err := r.Client.List(ctx, vcpList); err != nil {
return
}
@@ -193,7 +193,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
return handler.Funcs{
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
cluster, ok := e.Object.(*v1alpha1.Cluster)
cluster, ok := e.Object.(*v1beta1.Cluster)
if !ok {
return
}
@@ -210,8 +210,8 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
// and if some of its spec influenced by the policy changed
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
oldCluster, okOld := e.ObjectOld.(*v1beta1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1beta1.Cluster)
if !okOld || !okNew {
return
@@ -248,9 +248,9 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling VirtualClusterPolicy")
log.Info("Reconciling VirtualClusterPolicy")
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -261,6 +261,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
// update Status if needed
if !reflect.DeepEqual(orig.Status, policy.Status) {
log.Info("Updating VirtualClusterPolicy Status")
if err := c.Client.Status().Update(ctx, &policy); err != nil {
return reconcile.Result{}, err
}
@@ -273,6 +275,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
// update VirtualClusterPolicy if needed
if !reflect.DeepEqual(orig, policy) {
log.Info("Updating VirtualClusterPolicy")
if err := c.Client.Update(ctx, &policy); err != nil {
return reconcile.Result{}, err
}
@@ -281,7 +285,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
return reconcile.Result{}, nil
}
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
return err
}
@@ -293,9 +297,9 @@ func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx conte
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling matching Namespaces")
log.V(1).Info("Reconciling matching Namespaces")
listOpts := client.MatchingLabels{
PolicyNameLabelKey: policy.Name,
@@ -307,8 +311,10 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
}
for _, ns := range namespaces.Items {
ctx = ctrl.LoggerInto(ctx, log.WithValues("namespace", ns.Name))
log.Info("reconciling Namespace")
log = log.WithValues("namespace", ns.Name)
ctx = ctrl.LoggerInto(ctx, log)
log.V(1).Info("Reconciling Namespace")
orig := ns.DeepCopy()
@@ -331,6 +337,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
c.reconcileNamespacePodSecurityLabels(ctx, &ns, policy)
if !reflect.DeepEqual(orig, &ns) {
log.Info("Updating Namespace")
if err := c.Client.Update(ctx, &ns); err != nil {
return err
}
@@ -340,9 +348,9 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling ResourceQuota")
log.V(1).Info("Reconciling ResourceQuota")
if policy.Spec.Quota == nil {
// check if resourceQuota object exists and deletes it.
@@ -357,6 +365,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
return client.IgnoreNotFound(err)
}
log.V(1).Info("Deleting ResourceQuota")
return c.Client.Delete(ctx, &toDeleteResourceQuota)
}
@@ -381,17 +391,21 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
return err
}
log.V(1).Info("Creating ResourceQuota")
err := c.Client.Create(ctx, resourceQuota)
if apierrors.IsAlreadyExists(err) {
log.V(1).Info("ResourceQuota already exists, updating.")
return c.Client.Update(ctx, resourceQuota)
}
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling LimitRange")
log.V(1).Info("Reconciling LimitRange")
// delete limitrange if spec.limits isnt specified.
if policy.Spec.Limit == nil {
@@ -406,6 +420,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
return client.IgnoreNotFound(err)
}
log.V(1).Info("Deleting LimitRange")
return c.Client.Delete(ctx, &toDeleteLimitRange)
}
@@ -429,19 +445,23 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
return err
}
log.V(1).Info("Creating LimitRange")
err := c.Client.Create(ctx, limitRange)
if apierrors.IsAlreadyExists(err) {
log.V(1).Info("LimitRange already exists, updating.")
return c.Client.Update(ctx, limitRange)
}
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling Clusters")
log.V(1).Info("Reconciling Clusters")
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
return err
}
@@ -455,6 +475,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
if !reflect.DeepEqual(orig, cluster) {
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
// continue updating also the other clusters even if an error occurred
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
}

View File

@@ -16,7 +16,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
@@ -81,7 +81,7 @@ func buildScheme() *runtime.Scheme {
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -15,7 +15,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -26,25 +26,25 @@ import (
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
Context("creating a VirtualClusterPolicy", func() {
It("should have the 'shared' allowedMode", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.SharedClusterMode))
})
It("should have the 'virtual' mode if specified", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.VirtualClusterMode,
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.VirtualClusterMode,
})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.VirtualClusterMode))
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.VirtualClusterMode))
})
It("should fail for a non-existing mode", func() {
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.ClusterMode("non-existing"),
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.ClusterMode("non-existing"),
},
}
@@ -67,7 +67,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a NetworkPolicy", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policies etc
@@ -122,7 +122,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should recreate the NetworkPolicy if deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policy
@@ -164,12 +164,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
It("should add and update the proper pod-security labels to the namespace", func() {
var (
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
privileged = v1beta1.PrivilegedPodSecurityAdmissionLevel
baseline = v1beta1.BaselinePodSecurityAdmissionLevel
restricted = v1beta1.RestrictedPodSecurityAdmissionLevel
)
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -264,9 +264,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should restore the labels if Namespace is updated", func() {
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
privileged := v1beta1.PrivilegedPodSecurityAdmissionLevel
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -308,19 +308,19 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's PriorityClass", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultPriorityClass: "foobar",
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -342,7 +342,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's NodeSelector", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
@@ -350,13 +350,13 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -378,18 +378,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update the nodeSelector if changed", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
NodeSelector: map[string]string{"label-1": "value-1"},
@@ -426,7 +426,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
// wait a bit and check it's restored
Eventually(func() bool {
var updatedCluster v1alpha1.Cluster
var updatedCluster v1beta1.Cluster
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, &updatedCluster)
@@ -439,7 +439,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a ResourceQuota if Quota is enabled", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -467,7 +467,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if Quota is deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -513,7 +513,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if unbound", func() {
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
clusterPolicy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -558,10 +558,10 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
})
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
func newPolicy(spec v1beta1.VirtualClusterPolicySpec) *v1beta1.VirtualClusterPolicy {
GinkgoHelper()
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
@@ -574,7 +574,7 @@ func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterP
return policy
}
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1beta1.VirtualClusterPolicy) {
GinkgoHelper()
if len(namespace.Labels) == 0 {

View File

@@ -3,50 +3,36 @@ package log
import (
"os"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
type Logger struct {
*zap.SugaredLogger
}
func New(debug bool) *Logger {
return &Logger{newZappLogger(debug).Sugar()}
}
func (l *Logger) WithError(err error) log.Logger {
return l
}
func (l *Logger) WithField(string, any) log.Logger {
return l
}
func (l *Logger) WithFields(field log.Fields) log.Logger {
return l
}
func (l *Logger) Named(name string) *Logger {
l.SugaredLogger = l.SugaredLogger.Named(name)
return l
}
func newZappLogger(debug bool) *zap.Logger {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
func New(debug bool, format string) *zap.Logger {
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
if debug {
lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
}
encoder := zapcore.NewJSONEncoder(encCfg)
core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl)
encoder := newEncoder(format)
core := zapcore.NewCore(encoder, zapcore.AddSync(os.Stderr), lvl)
return zap.New(core)
}
func newEncoder(format string) zapcore.Encoder {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
var encoder zapcore.Encoder
if format == "text" {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
encoder = zapcore.NewConsoleEncoder(encCfg)
} else {
encoder = zapcore.NewJSONEncoder(encCfg)
}
return &ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}
}

View File

@@ -7,13 +7,13 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), Label(certificatesTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
@@ -21,6 +21,10 @@ var _ = When("a cluster with custom certificates is installed with individual ce
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
// create custom cert secret
customCertDir := "testdata/customcerts/"
@@ -56,25 +60,25 @@ var _ = When("a cluster with custom certificates is installed with individual ce
cluster := NewCluster(namespace.Name)
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ServerCA: v1alpha1.CredentialSource{
Sources: v1beta1.CredentialSources{
ServerCA: v1beta1.CredentialSource{
SecretName: "server-ca",
},
ClientCA: v1alpha1.CredentialSource{
ClientCA: v1beta1.CredentialSource{
SecretName: "client-ca",
},
ETCDServerCA: v1alpha1.CredentialSource{
ETCDServerCA: v1beta1.CredentialSource{
SecretName: "etcd-server-ca",
},
ETCDPeerCA: v1alpha1.CredentialSource{
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: "etcd-peer-ca",
},
RequestHeaderCA: v1alpha1.CredentialSource{
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: "request-header-ca",
},
ServiceAccountToken: v1alpha1.CredentialSource{
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: "service",
},
},

View File

@@ -5,7 +5,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkingTestsLabel), func() {
var (
cluster1 *VirtualCluster
cluster2 *VirtualCluster
@@ -28,7 +28,6 @@ var _ = When("two virtual clusters are installed", Label("e2e"), func() {
var (
stdout string
stderr string
curlCmd string
err error
)
@@ -70,25 +69,25 @@ var _ = When("two virtual clusters are installed", Label("e2e"), func() {
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).Should(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
})
})

View File

@@ -6,29 +6,53 @@ import (
"errors"
"time"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("deletes the pod in the virtual cluster when deleted from the host", func() {
ctx := context.Background()
pod, _ := virtualCluster.NewNginxPod("")
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(pod)
err := k8s.CoreV1().Pods(namespacedName.Namespace).Delete(ctx, namespacedName.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
_, err := virtualCluster.Client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
return apierrors.IsNotFound(err)
}).
WithPolling(time.Second * 5).
WithTimeout(time.Minute).
Should(BeTrue())
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
@@ -86,11 +110,11 @@ var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
})
})
var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualClusterWithType(v1alpha1.DynamicPersistenceMode)
virtualCluster = NewVirtualClusterWithType(v1beta1.DynamicPersistenceMode)
})
AfterEach(func() {
@@ -101,6 +125,72 @@ var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("can delete the cluster", func() {
ctx := context.Background()
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
It("can delete a HA cluster", func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1beta1.DynamicPersistenceMode
cluster.Spec.Servers = ptr.To[int32](2)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster := &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second).
Should(BeEmpty())
})
It("uses the same bootstrap secret after a restart", func() {
ctx := context.Background()

View File

@@ -10,7 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -18,24 +18,28 @@ import (
. "github.com/onsi/gomega"
)
var _ = When("a cluster's status is tracked", Label("e2e"), func() {
var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLabel), func() {
var (
namespace *corev1.Namespace
vcp *v1alpha1.VirtualClusterPolicy
vcp *v1beta1.VirtualClusterPolicy
)
// This BeforeEach/AfterEach will create a new namespace and a default policy for each test.
BeforeEach(func() {
ctx := context.Background()
namespace = NewNamespace()
vcp = &v1alpha1.VirtualClusterPolicy{
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
}
Expect(k8sClient.Create(ctx, vcp)).To(Succeed())
namespace = NewNamespace()
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(namespace), namespace)
Expect(err).To(Not(HaveOccurred()))
namespace.Labels = map[string]string{
policy.PolicyNameLabelKey: vcp.Name,
}
@@ -53,7 +57,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should start with Provisioning status and transition to Ready", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "status-cluster-",
Namespace: namespace.Name,
@@ -68,7 +72,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterProvisioning))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -84,7 +88,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterReady))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -101,13 +105,13 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should be in Pending status with ValidationFailed reason", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.VirtualClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.VirtualClusterMode,
},
}
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
@@ -119,7 +123,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterPending))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterPending))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())

145
tests/cluster_sync_test.go Normal file
View File

@@ -0,0 +1,145 @@
package k3k_test
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster is created", Ordered, Label("e2e"), func() {
var (
virtualCluster *VirtualCluster
virtualConfigMap *corev1.ConfigMap
virtualService *corev1.Service
)
BeforeAll(func() {
virtualCluster = NewVirtualCluster()
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
When("a ConfigMap is created in the virtual cluster", func() {
BeforeAll(func() {
ctx := context.Background()
virtualConfigMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cm",
Namespace: "default",
},
}
var err error
virtualConfigMap, err = virtualCluster.Client.CoreV1().ConfigMaps("default").Create(ctx, virtualConfigMap, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("is replicated in the host cluster", func() {
ctx := context.Background()
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(virtualConfigMap)
// check that the ConfigMap is synced in the host cluster
Eventually(func(g Gomega) {
_, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})
When("a Service is created in the virtual cluster", func() {
BeforeAll(func() {
ctx := context.Background()
virtualService = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-svc",
Namespace: "default",
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{{Port: 8888}},
},
}
var err error
virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
})
It("is replicated in the host cluster", func() {
ctx := context.Background()
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(virtualService)
// check that the ConfigMap is synced in the host cluster
Eventually(func(g Gomega) {
_, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})
When("the cluster is deleted", func() {
BeforeAll(func() {
ctx := context.Background()
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
})
It("will delete the ConfigMap from the host cluster", func() {
ctx := context.Background()
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(virtualConfigMap)
// check that the ConfigMap is deleted from the host cluster
Eventually(func(g Gomega) {
_, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
It("will delete the Service from the host cluster", func() {
ctx := context.Background()
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(virtualService)
// check that the Service is deleted from the host cluster
Eventually(func(g Gomega) {
_, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
})
})
})

View File

@@ -0,0 +1,990 @@
package k3k_test
import (
"context"
"strings"
"time"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
var nodes v1.NodeList
Expect(k8sClient.List(ctx, &nodes)).To(Succeed())
aPods := listAgentPods(ctx, virtualCluster)
Expect(aPods).To(HaveLen(len(nodes.Items)))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
var nodes v1.NodeList
g.Expect(k8sClient.List(ctx, &nodes)).To(Succeed())
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(aPods).To(HaveLen(len(nodes.Items)))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To[int32](1)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To[int32](1)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
DeferCleanup(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To[int32](1)
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
// agent pods
agentPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(agentPods)).To(Equal(1))
agentPod := agentPods[0]
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
Eventually(func(g Gomega) {
// since there is no way to check nodes in shared mode
// we can check if the endpoints are registered to N nodes
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will scale up server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// scale cluster servers to 3 nodes
cluster.Spec.Servers = ptr.To[int32](3)
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(3))
for _, serverPod := range serverPods {
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(3))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
// start cluster with 3 servers
cluster.Spec.Servers = ptr.To[int32](3)
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
// no need to check servers status since createCluster() will wait until all servers are in ready state
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(3))
Eventually(func(g Gomega) {
// since there is no way to check nodes in shared mode
// we can check if the endpoints are registered to N nodes
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(3))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will scale down server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// scale down cluster servers to 1 node
cluster.Spec.Servers = ptr.To[int32](1)
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
_, cond := pod.GetPodCondition(&serverPods[0].Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Mode = v1beta1.VirtualClusterMode
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
Eventually(func(g Gomega) {
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes.Items)).To(Equal(1))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 5).
Should(Succeed())
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will scale up server pods", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// scale cluster servers to 3 nodes
cluster.Spec.Servers = ptr.To[int32](3)
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(3))
for _, serverPod := range serverPods {
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes.Items)).To(Equal(3))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 5).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
DeferCleanup(func() {
DeleteNamespaces(namespace.Name)
})
cluster := NewCluster(namespace.Name)
cluster.Spec.Mode = v1beta1.VirtualClusterMode
// start cluster with 3 servers
cluster.Spec.Servers = ptr.To[int32](3)
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
// no need to check servers status since createCluster() will wait until all servers are in ready state
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(3))
Eventually(func(g Gomega) {
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes.Items)).To(Equal(3))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 5).
Should(Succeed())
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will scale down server pods", func() {
By("Scaling down cluster")
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// scale down cluster servers to 1 node
cluster.Spec.Servers = ptr.To[int32](1)
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
serverPods := listServerPods(ctx, virtualCluster)
// Wait for all the server pods to be marked for deletion
for _, serverPod := range serverPods {
g.Expect(serverPod.DeletionTimestamp).NotTo(BeNil())
}
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 3).
Should(Succeed())
By("Waiting for cluster to be ready again")
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
_, cond := pod.GetPodCondition(&serverPods[0].Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
// we can't check for number of nodes in scale down because the nodes will be there but in a non-ready state
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 2).
Should(Succeed())
By("Checking that Nginx Pod is Running")
Eventually(func(g Gomega) {
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
// TODO: there is a possible issue where the Pod is not being marked as Ready
// if the kubelet lost the sync with the API server.
// We check for the ContainersReady status (all containers in the pod are ready),
// but this is probably to investigate.
// Related issue (?): https://github.com/kubernetes/kubernetes/issues/82346
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.ContainersReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute).
Should(Succeed())
})
})

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"context"
"fmt"
"net/url"
"os"
"strings"
"sync"
"time"
@@ -13,14 +15,15 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -29,22 +32,22 @@ import (
)
type VirtualCluster struct {
Cluster *v1alpha1.Cluster
Cluster *v1beta1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
}
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
return NewVirtualClusterWithType(v1alpha1.EphemeralPersistenceMode)
GinkgoHelper()
return NewVirtualClusterWithType(v1beta1.EphemeralPersistenceMode)
}
func NewVirtualClusterWithType(persistenceType v1alpha1.PersistenceMode) *VirtualCluster {
func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *VirtualCluster {
GinkgoHelper()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = persistenceType
@@ -84,11 +87,11 @@ func NewVirtualClusters(n int) []*VirtualCluster {
return clusters
}
func NewNamespace() *corev1.Namespace {
func NewNamespace() *v1.Namespace {
GinkgoHelper()
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
return namespace
@@ -97,6 +100,11 @@ func NewNamespace() *corev1.Namespace {
func DeleteNamespaces(names ...string) {
GinkgoHelper()
if _, found := os.LookupEnv("KEEP_NAMESPACES"); found {
By(fmt.Sprintf("Keeping namespace %v", names))
return
}
wg := sync.WaitGroup{}
wg.Add(len(names))
@@ -117,73 +125,106 @@ func deleteNamespace(name string) {
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
func NewCluster(namespace string) *v1beta1.Cluster {
return &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.EphemeralPersistenceMode,
},
ServerArgs: []string{
"--disable-network-policy",
},
},
}
}
func CreateCluster(cluster *v1alpha1.Cluster) {
func CreateCluster(cluster *v1beta1.Cluster) {
GinkgoHelper()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", cluster.Namespace))
ctx := context.Background()
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)
By(fmt.Sprintf("Waiting for cluster %s to be ready in namespace %s. Expected servers: %d. Expected agents: %d", cluster.Name, cluster.Namespace, expectedServers, expectedAgents))
// track the Eventually status to log for changes
prev := -1
// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
serverRunning := false
kubeletRunning := false
// all the servers and agents needs to be in a running phase
var serversReady, agentsReady int
for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
if strings.Contains(imageName, "rancher/k3s") {
serverRunning = pod.Status.Phase == corev1.PodRunning
} else if strings.Contains(imageName, "rancher/k3k-kubelet") {
kubeletRunning = pod.Status.Phase == corev1.PodRunning
for _, k3sPod := range podList.Items {
_, cond := pod.GetPodCondition(&k3sPod.Status, v1.PodReady)
// pod not ready
if cond == nil || cond.Status != v1.ConditionTrue {
continue
}
if serverRunning && kubeletRunning {
return true
if k3sPod.Labels["role"] == "server" {
serversReady++
}
if k3sPod.Labels["type"] == "agent" {
agentsReady++
}
}
return false
if prev != (serversReady + agentsReady) {
GinkgoLogr.Info("Waiting for pods to be Ready",
"servers", serversReady, "agents", agentsReady,
"name", cluster.Name, "namespace", cluster.Namespace,
"time", time.Now().Format(time.DateTime),
)
prev = (serversReady + agentsReady)
}
// the server pods should equal the expected servers, but since in shared mode we also have the kubelet is fine to have more than one
if (serversReady != expectedServers) || (agentsReady < expectedAgents) {
return false
}
return true
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
WithTimeout(time.Minute * 5).
WithPolling(time.Second * 10).
Should(BeTrue())
By("Cluster is ready")
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
func NewVirtualK8sClient(cluster *v1beta1.Cluster) *kubernetes.Clientset {
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
return virtualK8sClient
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clientset, *rest.Config) {
GinkgoHelper()
var (
@@ -215,37 +256,60 @@ func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Client
return virtualK8sClient, restcfg
}
func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
func (c *VirtualCluster) NewNginxPod(namespace string) (*v1.Pod, string) {
GinkgoHelper()
if namespace == "" {
namespace = "default"
}
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
nginxPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "nginx-",
Namespace: namespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
By("Creating Pod")
By("Creating Nginx Pod and waiting for it to be Ready")
ctx := context.Background()
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
var err error
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the nginx Pod is up and running in the virtual cluster
Eventually(func(g Gomega) {
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(Not(HaveOccurred()))
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(Succeed())
By(fmt.Sprintf("Nginx Pod is running (%s/%s)", nginxPod.Namespace, nginxPod.Name))
// only check the pod on the host cluster if the mode is shared mode
if c.Cluster.Spec.Mode != v1beta1.SharedClusterMode {
return nginxPod, ""
}
var podIP string
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
@@ -260,7 +324,7 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
)
return pod.Status.Phase == corev1.PodRunning && podIP != ""
return pod.Status.Phase == v1.PodRunning && podIP != ""
}
}
@@ -270,16 +334,12 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
WithPolling(time.Second * 5).
Should(BeTrue())
// get the running pod from the virtual cluster
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
return nginxPod, podIP
}
// ExecCmd exec command on specific pod and wait the command's output.
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
option := &corev1.PodExecOptions{
func (c *VirtualCluster) ExecCmd(pod *v1.Pod, command string) (string, string, error) {
option := &v1.PodExecOptions{
Command: []string{"sh", "-c", command},
Stdout: true,
Stderr: true,
@@ -311,7 +371,7 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoHelper()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
@@ -319,16 +379,71 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
}
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return serverPods.Items
}
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
labelSelector := fmt.Sprintf("cluster=%s,type=agent,mode=%s", virtualCluster.Cluster.Name, virtualCluster.Cluster.Spec.Mode)
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return agentPods.Items
}
// getEnv will get an environment variable from a pod it will return empty string if not found
func getEnv(pod *v1.Pod, envName string) (string, bool) {
container := pod.Spec.Containers[0]
for _, envVar := range container.Env {
if envVar.Name == envName {
return envVar.Value, true
}
}
return "", false
}
// isArgFound will return true if the argument passed to the function is found in container args
func isArgFound(pod *v1.Pod, arg string) bool {
container := pod.Spec.Containers[0]
for _, cmd := range container.Command {
if strings.Contains(cmd, arg) {
return true
}
}
return false
}
func getServerIP(ctx context.Context, cfg *rest.Config) (string, error) {
if k3sContainer != nil {
return k3sContainer.ContainerIP(ctx)
}
u, err := url.Parse(cfg.Host)
if err != nil {
return "", err
}
// If Host includes a port, u.Hostname() extracts just the hostname part
return u.Hostname(), nil
}

View File

@@ -1,35 +0,0 @@
package k3k_test
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
return true
}
}
return false
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})

View File

@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
@@ -25,13 +26,14 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/pkg/api/v1/pod"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -39,7 +41,13 @@ import (
const (
k3kNamespace = "k3k-system"
k3kName = "k3k"
slowTestsLabel = "slow"
updateTestsLabel = "update"
persistenceTestsLabel = "persistence"
networkingTestsLabel = "networking"
statusTestsLabel = "status"
certificatesTestsLabel = "certificates"
)
func TestTests(t *testing.T) {
@@ -48,56 +56,57 @@ func TestTests(t *testing.T) {
}
var (
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
repo string
helmActionConfig *action.Configuration
)
var _ = BeforeSuite(func() {
var err error
ctx := context.Background()
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.32.1-k3s1")
Expect(err).To(Not(HaveOccurred()))
repo = os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
hostIP, err = k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
GinkgoWriter.Println("K3s containerIP: " + hostIP)
kubeconfig, err := k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
DeferCleanup(os.Remove, kubeconfigPath)
initKubernetesClient(kubeconfig)
installK3kChart(ctx, kubeconfig)
if dockerInstallEnabled {
installK3SDocker(ctx)
initKubernetesClient(ctx)
installK3kChart()
} else {
initKubernetesClient(ctx)
}
patchPVC(ctx, k8s)
})
func initKubernetesClient(kubeconfig []byte) {
var err error
func initKubernetesClient(ctx context.Context) {
var (
err error
kubeconfig []byte
)
kubeconfigPath := os.Getenv("KUBECONFIG")
Expect(kubeconfigPath).To(Not(BeEmpty()))
kubeconfig, err = os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
hostIP, err = getServerIP(ctx, restcfg)
Expect(err).To(Not(HaveOccurred()))
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
@@ -107,39 +116,84 @@ func initKubernetesClient(kubeconfig []byte) {
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
err := v1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
func installK3kChart(ctx context.Context, kubeconfig []byte) {
func installK3SDocker(ctx context.Context) {
var (
err error
kubeconfig []byte
)
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
if k3sHostVersion == "" {
k3sHostVersion = "v1.32.1+k3s1"
}
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
Expect(err).To(Not(HaveOccurred()))
containerIP, err := k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Println("K3s containerIP: " + containerIP)
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(os.Remove, kubeconfigPath)
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
GinkgoWriter.Print(kubeconfigPath)
GinkgoWriter.Print(string(kubeconfig))
}
func installK3kChart() {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
Expect(err).To(Not(HaveOccurred()))
actionConfig := new(action.Configuration)
helmActionConfig = new(action.Configuration)
kubeconfig, err := os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restClientGetter, err := NewRESTClientGetter(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
err = actionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(actionConfig)
iCli.ReleaseName = k3kName
iCli := action.NewInstall(helmActionConfig)
iCli.ReleaseName = "k3k"
iCli.Namespace = k3kNamespace
iCli.CreateNamespace = true
iCli.Timeout = time.Minute
@@ -148,7 +202,7 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
imageMap, _ := controllerMap["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": "rancher/k3k",
"repository": repo + "/k3k",
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
@@ -157,13 +211,10 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": "rancher/k3k-kubelet",
"repository": repo + "/k3k-kubelet",
"tag": "dev",
})
err = k3sContainer.LoadImages(ctx, "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
@@ -171,76 +222,69 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
}
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
pvc := &corev1.PersistentVolumeClaim{
deployments, err := clientset.AppsV1().Deployments(k3kNamespace).List(ctx, metav1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
Expect(deployments.Items).To(HaveLen(1))
k3kDeployment := &deployments.Items[0]
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "coverage-data-pvc",
Namespace: k3kNamespace,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("100M"),
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("100M"),
},
},
},
}
_, err := clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
_, err = clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
patchData := []byte(`
{
"spec": {
"template": {
"spec": {
"volumes": [
{
"name": "tmp-covdata",
"persistentVolumeClaim": {
"claimName": "coverage-data-pvc"
}
}
],
"containers": [
{
"name": "k3k",
"volumeMounts": [
{
"name": "tmp-covdata",
"mountPath": "/tmp/covdata"
}
],
"env": [
{
"name": "GOCOVERDIR",
"value": "/tmp/covdata"
}
]
}
]
}
}
}
}`)
k3kSpec := k3kDeployment.Spec.Template.Spec
GinkgoWriter.Printf("Applying patch to deployment '%s' in namespace '%s'...\n", k3kName, k3kNamespace)
// check if the Deployment has already the volume for the coverage
for _, volumes := range k3kSpec.Volumes {
if volumes.Name == "tmp-covdata" {
return
}
}
_, err = clientset.AppsV1().Deployments(k3kNamespace).Patch(
ctx,
k3kName,
types.StrategicMergePatchType,
patchData,
metav1.PatchOptions{},
)
k3kSpec.Volumes = append(k3kSpec.Volumes, v1.Volume{
Name: "tmp-covdata",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "coverage-data-pvc",
},
},
})
k3kSpec.Containers[0].VolumeMounts = append(k3kSpec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: "tmp-covdata",
MountPath: "/tmp/covdata",
})
k3kSpec.Containers[0].Env = append(k3kSpec.Containers[0].Env, v1.EnvVar{
Name: "GOCOVERDIR",
Value: "/tmp/covdata",
})
k3kDeployment.Spec.Template.Spec = k3kSpec
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
GinkgoWriter.Println("Checking K3k deployment status")
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kName, metav1.GetOptions{})
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
// 1. Check if the controller has observed the latest generation
@@ -257,7 +301,7 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
// 3. Check if all updated replicas are available
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
GinkgoWriter.Printf("K3k deployment availabl replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
return false
}
@@ -279,69 +323,125 @@ var _ = AfterSuite(func() {
}
dumpK3kCoverageData(ctx, goCoverDir)
if k3sContainer != nil {
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
}
})
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
func dumpK3kCoverageData(ctx context.Context, folder string) {
GinkgoWriter.Println("Restarting k3k controller...")
By("Restarting k3k controller")
var podList corev1.PodList
var podList v1.PodList
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
k3kPod := podList.Items[0]
k3kContainerName := k3kPod.Spec.Containers[0].Name
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", "k3k", "--", "kill", "1")
By("Restarting k3k controller " + k3kPod.Name + "/" + k3kContainerName)
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", k3kContainerName, "--", "/bin/sh", "-c", "kill 1")
output, err := cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
Eventually(func() corev1.PodPhase {
var pod corev1.Pod
By("Waiting to be ready again")
Eventually(func(g Gomega) {
key := types.NamespacedName{
Namespace: k3kNamespace,
Name: k3kPod.Name,
}
err = k8sClient.Get(ctx, key, &pod)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Printf("K3k controller status: %s\n", pod.Status.Phase)
var controllerPod v1.Pod
return pod.Status.Phase
err = k8sClient.Get(ctx, key, &controllerPod)
g.Expect(err).To(Not(HaveOccurred()))
_, cond := pod.GetPodCondition(&controllerPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
MustPassRepeatedly(5).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
By("Controller is ready again, dumping coverage data")
GinkgoWriter.Printf("Downloading covdata from k3k controller %s/%s to %s\n", k3kNamespace, k3kPod.Name, folder)
tarPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "tar",
Namespace: k3kNamespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "tar",
Image: "busybox",
Command: []string{"/bin/sh", "-c", "sleep 3600"},
VolumeMounts: []v1.VolumeMount{{
Name: "tmp-covdata",
MountPath: "/tmp/covdata",
}},
}},
Volumes: []v1.Volume{{
Name: "tmp-covdata",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "coverage-data-pvc",
},
},
}},
},
}
_, err = k8s.CoreV1().Pods(k3kNamespace).Create(ctx, tarPod, metav1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Waiting for tar pod to be ready")
Eventually(func(g Gomega) {
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(tarPod), tarPod)
g.Expect(err).To(Not(HaveOccurred()))
_, cond := pod.GetPodCondition(&tarPod.Status, v1.PodReady)
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second).
WithTimeout(time.Minute).
Should(Equal(corev1.PodRunning))
Should(Succeed())
GinkgoWriter.Printf("Downloading covdata from k3k controller to %s\n", folder)
By("Copying covdata from tar pod")
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, k3kPod.Name), folder)
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, tarPod.Name), folder)
output, err = cmd.CombinedOutput()
Expect(err).NotTo(HaveOccurred(), string(output))
Expect(k8sClient.Delete(ctx, tarPod)).To(Succeed())
}
func getK3kLogs(ctx context.Context) io.ReadCloser {
var podList corev1.PodList
var podList v1.PodList
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
Expect(err).To(Not(HaveOccurred()))
k3kPod := podList.Items[0]
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{Previous: true})
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{Previous: true})
podLogs, err := req.Stream(ctx)
Expect(err).To(Not(HaveOccurred()))
@@ -382,13 +482,13 @@ func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.
SubResource("exec")
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
if err := v1.AddToScheme(scheme); err != nil {
return nil, fmt.Errorf("error adding to scheme: %v", err)
}
parameterCodec := runtime.NewParameterCodec(scheme)
req.VersionedParams(&corev1.PodExecOptions{
req.VersionedParams(&v1.PodExecOptions{
Command: command,
Stdin: stdin != nil,
Stdout: stdout != nil,
@@ -416,8 +516,8 @@ func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.
return stderr.Bytes(), nil
}
func caCertSecret(name, namespace string, crt, key []byte) *corev1.Secret {
return &corev1.Secret{
func caCertSecret(name, namespace string, crt, key []byte) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,