Compare commits

...

51 Commits

Author SHA1 Message Date
jpgouin
a2f5fd7592 Merge pull request #328 from takushi-35/ehemeral-docs
[DOC] fix: Incorrect creation method when using Ephemeral Storage in advanced-usage.md
2025-04-11 14:27:40 +02:00
takushi-35
c8df86b83b fix: Correcting incorrect procedures in advanced-usage.md 2025-04-10 13:27:00 +09:00
Hussein Galal
d41d2b8c31 Fix update bug in ensureObject (#325)
* Fix update bug in ensureObjects

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix log msg

Co-authored-by: Enrico Candino <enrico.candino@gmail.com>

* Fix import

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-04-09 17:25:48 +02:00
Enrico Candino
7cb2399b89 Update and fix to k3kcli for new ClusterSet integration (#321)
* added clusterset flag to cluster creation and displayname to clusterset creation

* updated cli docs
2025-04-04 13:22:55 +02:00
Enrico Candino
90568f24b1 Added ClusterSet as singleton (#316)
* added ClusterSet as singleton

* fix tests
2025-04-03 16:26:25 +02:00
Hussein Galal
0843a9e313 Initial support for ResourceQuotas in clustersets (#308)
* Add ResourceQuota to clusterset

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Generate docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add a defualt limitRange for ClusterSets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix linting

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add test for clusterset limitRange

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add server and worker limits

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add default limits and fixes to resourcesquota

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make build-crds

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make build-crds

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make spec as pointer

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* delete default limit

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* return on delete in limitrange

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-04-03 12:30:48 +02:00
Enrico Candino
b58578788c Add clusterset commands (#319)
* added clusterset create command, small refactor with appcontext

* added clusterset delete

* updated docs
2025-04-03 11:07:32 +02:00
Enrico Candino
c4cc1e69cd requeue if server not ready (#318) 2025-04-03 10:45:18 +02:00
Enrico Candino
bd947c0fcb Create dedicated namespace for new clusters (#314)
* create dedicated namespace for new clusters

* porcelain test

* use --exit-code instead of test and shell for escaping issue

* update go.mod
2025-03-26 14:53:41 +01:00
Hussein Galal
b0b61f8d8e Fix delete cli (#281)
* Fix delete cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update docs

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix delete cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* check if object has a controller reference before removing

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* move the update to the if condition

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* move the update to the if condition

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-03-24 13:44:00 +02:00
Enrico Candino
3281d54c6c fix typo (#300) 2025-03-24 10:48:42 +01:00
Hussein Galal
853b0a7e05 Chart update for 0.3.1 (#309)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-03-21 01:59:53 +02:00
Rossella Sblendido
28b15d2e92 Merge pull request #299 from rancher/doc-k3d
Add documentation to install k3k on k3d
2025-03-10 11:07:02 +01:00
rossella
cad59c0494 Moved how to to development.md 2025-03-07 17:38:22 +01:00
Enrico Candino
d0810af17c Fix kubeconfig load from multiple configuration files (#301)
* fix kubeconfig load with standards kubectl approach

* update cli docs
2025-03-07 15:11:00 +01:00
Enrico Candino
2b7202e676 Added NetworkPolicy for Cluster isolation (#290)
* added cluster NetworkPolicy

* wip tests

* remove focus

* added networking test

* test refactoring

* unfocus

* revert labels

* added async creation of clusters, and namespace deletion

* add unfocus validation
2025-03-07 14:36:49 +01:00
rossella
4975b0b799 Add documentation to install k3k on k3d
k3d is a valid local option for testing k3k. Adding documentation
on how to install k3k on top of k3d.
2025-03-04 16:48:21 +01:00
jpgouin
90d17cd6dd Merge pull request #288 from jp-gouin/fix-cli 2025-03-04 13:48:34 +01:00
Justin J. Janes
3e5e9c7965 adding a new issue template for bug reports (#258)
* adding a new issue template for bug reports

* adding extra helper commands for logs from k3k
2025-03-04 12:33:34 +02:00
jpgouin
1d027909ee cli - fix bug with Kubeconfig value resolution 2025-03-04 10:23:54 +00:00
Enrico Candino
6105402bf2 charts-0.3.1-r1 (#283) 2025-03-03 17:14:13 +01:00
jpgouin
6031eeb09b Merge pull request #273 from jp-gouin/readme-1
Update quickstart pre-requisites and generate automatically cli docs
2025-03-03 14:44:11 +01:00
jpgouin
2f582a473a update cli docs 2025-03-03 13:31:40 +00:00
jpgouin
26d3d29ba1 remove reference to persistence type 2025-03-03 13:21:07 +00:00
jpgouin
e97a3f5966 remove reference to persistence type 2025-03-03 13:21:07 +00:00
jpgouin
07b9cdcc86 update readme and advanced-usage doc 2025-03-03 13:21:07 +00:00
jpgouin
a3cbe42782 update doc and genclidoc 2025-03-03 13:21:07 +00:00
jpgouin
3cf8c0a744 default the kubeconfig flag to $HOME/.kube/config 2025-03-03 13:21:07 +00:00
jpgouin
ddc367516b fix test 2025-03-03 13:21:07 +00:00
jpgouin
7b83b9fd36 fix test 2025-03-03 13:21:07 +00:00
jpgouin
bf8fdd9071 fix lint 2025-03-03 13:21:07 +00:00
jpgouin
4ca5203df1 Flatten the cli structure, add docs generation in the Makefile and remove crds/Makefile 2025-03-03 13:21:07 +00:00
Enrico Candino
5e8bc0d3cd Update CRDs documentation (#279)
* complete CRD documentation

* fix missing rebuild of CRDs
2025-03-03 11:47:53 +01:00
Enrico Candino
430e18bf30 Added wsl linter, and fixed related issues (#275)
* added wsl linter

* fixed issues
2025-02-27 10:59:02 +01:00
Hussein Galal
ec0e5a4a87 Support for multi node in shared mode (#269)
* Support for multi node in shared mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixing typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-26 22:55:31 +02:00
Hussein Galal
29438121ba Fix server/agent extra args in the cli (#272)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-26 11:32:25 +02:00
Hussein Galal
c2cde0c9ba Fix the default CIDRs for both modes (#271)
* Fix the default CIDRs for both modes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix service/cluster cidr

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-26 11:32:17 +02:00
jpgouin
1be43e0564 Merge pull request #270 from jp-gouin/readme-1
Update README.md
2025-02-25 11:27:57 +01:00
jpgouin
8913772240 Update README.md
Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-02-25 11:07:47 +01:00
jpgouin
dbbe03ca96 Update README.md
Co-authored-by: Enrico Candino <enrico.candino@gmail.com>
2025-02-25 10:56:45 +01:00
jpgouin
e52a682cca Update README.md
add explanation to create a cluster from a host cluster accessed through Rancher
2025-02-24 14:22:56 +01:00
Enrico Candino
26a0bb6583 Added ServiceCIDR lookup, and changed default (#263)
* added serviceCIDR lookup

* fix log

* fix comment

* swap serviceCIDR lookup
2025-02-24 12:08:58 +01:00
Enrico Candino
dee20455ee added multiarch support (#262) 2025-02-21 14:36:17 +01:00
Enrico Candino
f5c9a4b3a1 fix duplicated envvars, added tests (#260) 2025-02-19 16:27:46 +01:00
Enrico Candino
65fe7a678f fix k3s version metadata (#256) 2025-02-18 16:04:11 +01:00
Hussein Galal
8811ba74de Fix cluster spec update (#257)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-18 15:40:49 +02:00
Enrico Candino
127b5fc848 Remove dapper (#254)
* wip drop dapper

* added tests, validate

* fix kubebuilder assets

* debug

* fix maybe

* export global

* export global 2

* fix goreleaser

* dev doc section improved

* crd and docs

* drop dapper

* drop unused tmpl

* added help

* typos, and added `build-crds` target to default
2025-02-18 11:59:20 +01:00
Hussein Galal
d95e3fd33a Chart update for v0.3.0 (#255)
* Chart update for v0.3.0

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update chart to 0.3.0

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update chart to 0.3.0-r1

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-17 15:06:22 +02:00
Hussein Galal
1f4b3c4835 Assign pod's hostname if not assigned (#253)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-17 12:47:49 +02:00
Enrico Candino
0056e4a3f7 add check for number of arguments (#252) 2025-02-17 10:37:42 +01:00
Hussein Galal
8bc5519db0 Update chart (#251)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-02-14 15:28:20 +02:00
98 changed files with 3649 additions and 1596 deletions

41
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,41 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
<!-- Thanks for helping us to improve K3K! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Environmental Info:**
Host Cluster Version:
<!-- For example K3S v1.32.1+k3s1 or RKE2 v1.31.5+rke2r1 -->
Node(s) CPU architecture, OS, and Version:
<!-- Provide the output from "uname -a" on the node(s) -->
Host Cluster Configuration:
<!-- Provide some basic information on the cluster configuration. For example, "1 servers, 2 agents CNI: Flannel". -->
K3K Cluster Configuration:
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
**Describe the bug:**
<!-- A clear and concise description of what the bug is. -->
**Steps To Reproduce:**
- Created a cluster with `k3k create`:
**Expected behavior:**
<!-- A clear and concise description of what you expected to happen. -->
**Actual behavior:**
<!-- A clear and concise description of what actually happened. -->
**Additional context / logs:**
<!-- Add any other context and/or logs about the problem here. -->
<!-- kubectl logs -n k3k-system -l app.kubernetes.io/instance=k3k -->
<!-- $ kubectl logs -n <cluster-namespace> k3k-<cluster-name>-server-0 -->
<!-- $ kubectl logs -n <cluster-namespace> -l cluster=<cluster-name>,mode=shared # in shared mode -->

View File

@@ -21,6 +21,9 @@ jobs:
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6

View File

@@ -35,6 +35,9 @@ jobs:
with:
go-version-file: go.mod
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: "Read secrets"
uses: rancher-eio/read-vault-secrets@main
if: github.repository_owner == 'rancher'

View File

@@ -37,24 +37,11 @@ jobs:
with:
go-version-file: go.mod
- name: Check go modules
run: |
go mod tidy
- name: Validate
run: make validate
git --no-pager diff go.mod go.sum
test -z "$(git status --porcelain)"
- name: Install tools
run: |
go install github.com/onsi/ginkgo/v2/ginkgo
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
ENVTEST_BIN=$(setup-envtest use -p path)
sudo mkdir -p /usr/local/kubebuilder/bin
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
- name: Run tests
run: ginkgo -v -r --skip-file=tests
- name: Run unit tests
run: make test-unit
tests-e2e:
runs-on: ubuntu-latest
@@ -70,19 +57,16 @@ jobs:
with:
go-version-file: go.mod
- name: Check go modules
run: |
go mod tidy
git --no-pager diff go.mod go.sum
test -z "$(git status --porcelain)"
- name: Validate
run: make validate
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Build
- name: Build and package
run: |
./scripts/build
make build
make package
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
@@ -90,8 +74,8 @@ jobs:
- name: Check k3kcli
run: k3kcli -v
- name: Run tests
run: ginkgo -v ./tests
- name: Run e2e tests
run: make test-e2e
- name: Archive k3s logs
uses: actions/upload-artifact@v4

1
.gitignore vendored
View File

@@ -7,3 +7,4 @@
.vscode/
__debug*
*-kubeconfig.yaml
.envtest

View File

@@ -10,3 +10,4 @@ linters:
# extra
- misspell
- wsl

View File

@@ -67,29 +67,78 @@ archives:
# REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z
#
dockers:
- id: k3k
use: docker
# k3k amd64
- use: buildx
goarch: amd64
ids:
- k3k
- k3kcli
dockerfile: "package/Dockerfile"
dockerfile: "package/Dockerfile.k3k"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
build_flag_templates:
- "--build-arg=BIN_K3K=k3k"
- "--build-arg=BIN_K3KCLI=k3kcli"
- id: k3k-kubelet
use: docker
- "--pull"
- "--platform=linux/amd64"
# k3k arm64
- use: buildx
goarch: arm64
ids:
- k3k-kubelet
dockerfile: "package/Dockerfile.kubelet"
- k3k
- k3kcli
dockerfile: "package/Dockerfile.k3k"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
build_flag_templates:
- "--build-arg=BIN_K3K=k3k"
- "--build-arg=BIN_K3KCLI=k3kcli"
- "--pull"
- "--platform=linux/arm64"
# k3k-kubelet amd64
- use: buildx
goarch: amd64
ids:
- k3k-kubelet
dockerfile: "package/Dockerfile.k3k-kubelet"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
build_flag_templates:
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
- "--pull"
- "--platform=linux/amd64"
# k3k-kubelet arm64
- use: buildx
goarch: arm64
ids:
- k3k-kubelet
dockerfile: "package/Dockerfile.k3k-kubelet"
skip_push: false
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
build_flag_templates:
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
- "--pull"
- "--platform=linux/arm64"
docker_manifests:
# k3k
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
# k3k-kubelet arm64
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
image_templates:
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
changelog:
sort: asc

View File

@@ -1,34 +0,0 @@
ARG GOLANG=rancher/hardened-build-base:v1.23.4b1
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
ENV ARCH $DAPPER_HOST_ARCH
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.59.0; \
fi
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
&& mv cr /bin/
# Tool for CRD generation.
ENV CONTROLLER_GEN_VERSION v0.14.0
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_GEN_VERSION}
# Tool to setup the envtest framework to run the controllers integration tests
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
ENVTEST_BIN=$(setup-envtest use -p path) && \
mkdir -p /usr/local/kubebuilder/bin && \
cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS GIT_TAG
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./ops/entry"]
CMD ["ci"]

114
Makefile
View File

@@ -1,14 +1,106 @@
TARGETS := $(shell ls ops)
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
@@chmod +x .dapper.tmp
@./.dapper.tmp -v
@mv .dapper.tmp .dapper
$(TARGETS): .dapper
./.dapper $@
REPO ?= rancher
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
.DEFAULT_GOAL := default
## Dependencies
.PHONY: $(TARGETS)
GOLANGCI_LINT_VERSION := v1.63.4
CONTROLLER_TOOLS_VERSION ?= v0.14.0
GINKGO_VERSION ?= v2.21.0
ENVTEST_VERSION ?= latest
ENVTEST_K8S_VERSION := 1.31.0
CRD_REF_DOCS_VER ?= v0.1.0
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
CONTROLLER_GEN ?= go run sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
ENVTEST_DIR ?= $(shell pwd)/.envtest
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
.PHONY: all
all: version build-crds build package ## Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
.PHONY: version
version: ## Print the current version
@echo $(VERSION)
.PHONY: build
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
@VERSION=$(VERSION) ./scripts/build
.PHONY: package
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
.PHONY: package-%
package-%:
docker build -f package/Dockerfile.$* \
-t $(REPO)/$*:$(VERSION) \
-t $(REPO)/$*:latest \
-t $(REPO)/$*:dev .
.PHONY: push
push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry
.PHONY: push-%
push-%:
docker push $(REPO)/$*:$(VERSION)
docker push $(REPO)/$*:latest
docker push $(REPO)/$*:dev
.PHONY: test
test: ## Run all the tests
$(GINKGO) -v -r --label-filter=$(label-filter)
.PHONY: test-unit
test-unit: ## Run the unit tests (skips the e2e)
$(GINKGO) -v -r --skip-file=tests/*
.PHONY: test-controller
test-controller: ## Run the controller tests (pkg/controller)
$(GINKGO) -v -r pkg/controller
.PHONY: test-e2e
test-e2e: ## Run the e2e tests
$(GINKGO) -v -r tests
.PHONY: build-crds
build-crds: ## Build the CRDs specs
@# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
@# allowDangerousTypes is needed for struct that use floats
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
paths=./pkg/apis/... \
output:crd:dir=./charts/k3k/crds
.PHONY: docs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml --renderer=markdown --source-path=./pkg/apis/k3k.io/v1alpha1 --output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go
.PHONY: lint
lint: ## Find any linting issues in the project
$(GOLANGCI_LINT) run --timeout=5m
.PHONY: validate
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
$(GINKGO) unfocus
go mod tidy
git status --porcelain
git --no-pager diff --exit-code
.PHONY: install
install: ## Install K3k with Helm on the targeted Kubernetes cluster
helm upgrade --install --namespace k3k-system --create-namespace \
--set image.repository=$(REPO)/k3k \
--set image.tag=$(VERSION) \
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
--set sharedAgent.image.tag=$(VERSION) \
k3k ./charts/k3k/
.PHONY: help
help: ## Show this help.
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'

View File

@@ -39,7 +39,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
### Prerequisites
* [Helm](https://helm.sh) must be installed to use the charts. Please refer to Helm's [documentation](https://helm.sh/docs) to get started.
* An existing [RKE2](https://docs.rke2.io/install/quickstart) Kubernetes cluster (recommended).
* A configured storage provider with a default storage class.
**Note:** If you do not have a storage provider, you can configure the cluster to use ephemeral or static storage. Please consult the [k3kcli advance usage](./docs/advanced-usage.md#using-the-cli) for instructions on using these options.
### Install the K3k controller
@@ -68,7 +71,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -76,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kc
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli Version: v0.2.2-rc4
k3kcli Version: v0.3.0
```
@@ -94,6 +97,16 @@ To create a new K3k cluster, use the following command:
```bash
k3kcli cluster create mycluster
```
> [!NOTE]
> **Creating a K3k Cluster on a Rancher-Managed Host Cluster**
>
> If your *host* Kubernetes cluster is managed by Rancher (e.g., your kubeconfig's `server` address includes a Rancher URL), use the `--kubeconfig-server` flag when creating your K3k cluster:
>
>```bash
>k3kcli cluster create --kubeconfig-server <host_node_IP_or_load_balancer_IP> mycluster
>```
>
> This ensures the generated kubeconfig connects to the correct endpoint.
When the K3s server is ready, `k3kcli` will generate the necessary kubeconfig file and print instructions on how to use it.

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.5-r1
appVersion: v0.2.2-rc4
version: 0.3.1-r2
appVersion: v0.3.1

View File

@@ -17,6 +17,10 @@ spec:
- name: v1alpha1
schema:
openAPIV3Schema:
description: |-
Cluster defines a virtual Kubernetes cluster managed by k3k.
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
k3k uses this to provision and manage these virtual clusters.
properties:
apiVersion:
description: |-
@@ -37,114 +41,103 @@ spec:
type: object
spec:
default: {}
description: Spec defines the desired state of the Cluster.
properties:
addons:
description: Addons is a list of secrets containing raw YAML which
will be deployed in the virtual K3k cluster on startup.
description: Addons specifies secrets containing raw YAML to deploy
on cluster startup.
items:
description: Addon specifies a Secret containing YAML to be deployed
on cluster startup.
properties:
secretNamespace:
description: SecretNamespace is the namespace of the Secret.
type: string
secretRef:
description: SecretRef is the name of the Secret.
type: string
type: object
type: array
agentArgs:
description: AgentArgs are the ordered key value pairs (e.x. "testArg",
"testValue") for the K3s pods running in agent mode.
description: |-
AgentArgs specifies ordered key-value pairs for K3s agent pods.
Example: ["--node-name=my-agent-node"]
items:
type: string
type: array
agents:
default: 0
description: Agents is the number of K3s pods to run in agent (worker)
mode.
description: |-
Agents specifies the number of K3s pods to run in agent (worker) mode.
Must be 0 or greater. Defaults to 0.
This field is ignored in "shared" mode.
format: int32
type: integer
x-kubernetes-validations:
- message: invalid value for agents
rule: self >= 0
clusterCIDR:
description: ClusterCIDR is the CIDR range for the pods of the cluster.
Defaults to 10.42.0.0/16.
description: |-
ClusterCIDR is the CIDR range for pod IPs.
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
This field is immutable.
type: string
x-kubernetes-validations:
- message: clusterCIDR is immutable
rule: self == oldSelf
clusterDNS:
description: |-
ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
Defaults to 10.43.0.10.
ClusterDNS is the IP address for the CoreDNS service.
Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
This field is immutable.
type: string
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
clusterLimit:
description: Limit is the limits that apply for the server/worker
nodes.
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit is the limits (cpu/mem) that apply to
the server nodes
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit is the limits (cpu/mem) that apply to
the agent nodes
type: object
type: object
expose:
description: |-
Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
clusterIP which is relatively secure, but difficult to access outside of the cluster.
Expose specifies options for exposing the API server.
By default, it's only exposed as a ClusterIP.
properties:
ingress:
description: Ingress specifies options for exposing the API server
through an Ingress.
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a key value map that will enrich
the Ingress annotations
description: Annotations specifies annotations to add to the
Ingress.
type: object
ingressClassName:
description: IngressClassName specifies the IngressClass to
use for the Ingress.
type: string
type: object
loadbalancer:
properties:
enabled:
type: boolean
required:
- enabled
description: LoadBalancer specifies options for exposing the API
server through a LoadBalancer service.
type: object
nodePort:
description: NodePort specifies options for exposing the API server
through NodePort.
properties:
etcdPort:
description: |-
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767)
If not specified, a port will be allocated (default: 30000-32767).
format: int32
type: integer
serverPort:
description: |-
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767)
If not specified, a port will be allocated (default: 30000-32767).
format: int32
type: integer
servicePort:
description: |-
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
If not specified, a port will be allocated (default: 30000-32767)
If not specified, a port will be allocated (default: 30000-32767).
format: int32
type: integer
type: object
@@ -158,8 +151,9 @@ spec:
- shared
- virtual
default: shared
description: Mode is the cluster provisioning mode which can be either
"shared" or "virtual". Defaults to "shared"
description: |-
Mode specifies the cluster provisioning mode: "shared" or "virtual".
Defaults to "shared". This field is immutable.
type: string
x-kubernetes-validations:
- message: mode is immutable
@@ -168,64 +162,84 @@ spec:
additionalProperties:
type: string
description: |-
NodeSelector is the node selector that will be applied to all server/agent pods.
In "shared" mode the node selector will be applied also to the workloads.
NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
In "shared" mode, this also applies to workloads.
type: object
persistence:
default:
type: dynamic
description: |-
Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
Persistence specifies options for persisting etcd data.
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
A default StorageClass is required for dynamic persistence.
properties:
storageClassName:
description: |-
StorageClassName is the name of the StorageClass to use for the PVC.
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
type: string
type:
default: dynamic
description: PersistenceMode is the storage mode of a Cluster.
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
priorityClass:
description: |-
PriorityClass is the priorityClassName that will be applied to all server/agent pods.
In "shared" mode the priorityClassName will be applied also to the workloads.
PriorityClass specifies the priorityClassName for server/agent pods.
In "shared" mode, this also applies to workloads.
type: string
serverArgs:
description: ServerArgs are the ordered key value pairs (e.x. "testArg",
"testValue") for the K3s pods running in server mode.
description: |-
ServerArgs specifies ordered key-value pairs for K3s server pods.
Example: ["--tls-san=example.com"]
items:
type: string
type: array
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit specifies resource limits for server nodes.
type: object
servers:
default: 1
description: Servers is the number of K3s pods to run in server (controlplane)
mode.
description: |-
Servers specifies the number of K3s pods to run in server (control plane) mode.
Must be at least 1. Defaults to 1.
format: int32
type: integer
x-kubernetes-validations:
- message: cluster must have at least one server
rule: self >= 1
serviceCIDR:
description: ServiceCIDR is the CIDR range for the services in the
cluster. Defaults to 10.43.0.0/16.
description: |-
ServiceCIDR is the CIDR range for service IPs.
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
This field is immutable.
type: string
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
tlsSANs:
description: TLSSANs are the subjectAlternativeNames for the certificate
the K3s server will use.
description: TLSSANs specifies subject alternative names for the K3s
server certificate.
items:
type: string
type: array
tokenSecretRef:
description: |-
TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
The Secret must have a "token" field in its data.
properties:
name:
description: name is unique within a namespace to reference a
@@ -238,34 +252,59 @@ spec:
type: object
x-kubernetes-map-type: atomic
version:
description: Version is a string representing the Kubernetes version
to be used by the virtual nodes.
description: |-
Version is the K3s version to use for the virtual nodes.
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
If not specified, the Kubernetes version of the host node will be used.
type: string
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit specifies resource limits for agent nodes.
type: object
type: object
status:
description: Status reflects the observed state of the Cluster.
properties:
clusterCIDR:
description: ClusterCIDR is the CIDR range for pod IPs.
type: string
clusterDNS:
description: ClusterDNS is the IP address for the CoreDNS service.
type: string
hostVersion:
description: HostVersion is the Kubernetes version of the host node.
type: string
persistence:
description: Persistence specifies options for persisting etcd data.
properties:
storageClassName:
description: |-
StorageClassName is the name of the StorageClass to use for the PVC.
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.
type: string
type:
default: dynamic
description: PersistenceMode is the storage mode of a Cluster.
description: Type specifies the persistence mode.
type: string
required:
- type
type: object
serviceCIDR:
description: ServiceCIDR is the CIDR range for service IPs.
type: string
tlsSANs:
description: TLSSANs specifies subject alternative names for the K3s
server certificate.
items:
type: string
type: array

View File

@@ -14,9 +14,19 @@ spec:
singular: clusterset
scope: Namespaced
versions:
- name: v1alpha1
- additionalPrinterColumns:
- jsonPath: .spec.displayName
name: Display Name
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: |-
ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
It allows defining common configurations and constraints for the clusters within the set.
properties:
apiVersion:
description: |-
@@ -37,12 +47,12 @@ spec:
type: object
spec:
default: {}
description: Spec is the spec of the ClusterSet
description: Spec defines the desired state of the ClusterSet.
properties:
allowedNodeTypes:
allowedModeTypes:
default:
- shared
description: AllowedNodeTypes are the allowed cluster provisioning
description: AllowedModeTypes specifies the allowed cluster provisioning
modes. Defaults to [shared].
items:
description: ClusterMode is the possible provisioning mode of a
@@ -56,70 +66,178 @@ spec:
x-kubernetes-validations:
- message: mode is immutable
rule: self == oldSelf
defaultLimits:
description: DefaultLimits are the limits used for servers/agents
when a cluster in the set doesn't provide any
properties:
serverLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: ServerLimit is the limits (cpu/mem) that apply to
the server nodes
type: object
workerLimit:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: WorkerLimit is the limits (cpu/mem) that apply to
the agent nodes
type: object
type: object
defaultNodeSelector:
additionalProperties:
type: string
description: DefaultNodeSelector is the node selector that applies
to all clusters (server + agent) in the set
description: DefaultNodeSelector specifies the node selector that
applies to all clusters (server + agent) in the set.
type: object
defaultPriorityClass:
description: DefaultPriorityClass is the priorityClassName applied
to all pods of all clusters in the set
description: DefaultPriorityClass specifies the priorityClassName
applied to all pods of all clusters in the set.
type: string
disableNetworkPolicy:
description: DisableNetworkPolicy is an option that will disable the
creation of a default networkpolicy for cluster isolation
description: DisableNetworkPolicy indicates whether to disable the
creation of a default network policy for cluster isolation.
type: boolean
maxLimits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimits are the limits that apply to all clusters (server
+ agent) in the set
displayName:
description: DisplayName is the human-readable name for the set.
type: string
limit:
description: |-
Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
to set defaults and constraints (min/max)
properties:
limits:
description: Limits is the list of LimitRangeItem objects that
are enforced.
items:
description: LimitRangeItem defines a min/max usage limit for
any resource that matches on kind.
properties:
default:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Default resource requirement limit value by
resource name if resource limit is omitted.
type: object
defaultRequest:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: DefaultRequest is the default resource requirement
request value by resource name if resource request is
omitted.
type: object
max:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Max usage constraints on this kind by resource
name.
type: object
maxLimitRequestRatio:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: MaxLimitRequestRatio if specified, the named
resource must have a request and limit that are both non-zero
where limit divided by request is less than or equal to
the enumerated value; this represents the max burst for
the named resource.
type: object
min:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Min usage constraints on this kind by resource
name.
type: object
type:
description: Type of resource that this limit applies to.
type: string
required:
- type
type: object
type: array
required:
- limits
type: object
podSecurityAdmissionLevel:
description: PodSecurityAdmissionLevel is the policy level applied
to the pods in the namespace.
description: PodSecurityAdmissionLevel specifies the pod security
admission level applied to the pods in the namespace.
enum:
- privileged
- baseline
- restricted
type: string
quota:
description: Quota specifies the resource limits for clusters within
a clusterset.
properties:
hard:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
hard is the set of desired hard limits for each named resource.
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
type: object
scopeSelector:
description: |-
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
but expressed using ScopeSelectorOperator in combination with possible values.
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
properties:
matchExpressions:
description: A list of scope selector requirements by scope
of the resources.
items:
description: |-
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
that relates the scope name and values.
properties:
operator:
description: |-
Represents a scope's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist.
type: string
scopeName:
description: The name of the scope that the selector
applies to.
type: string
values:
description: |-
An array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty.
This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- operator
- scopeName
type: object
type: array
type: object
x-kubernetes-map-type: atomic
scopes:
description: |-
A collection of filters that must match each object tracked by a quota.
If not specified, the quota matches all objects.
items:
description: A ResourceQuotaScope defines a filter that must
match each object tracked by a quota
type: string
type: array
type: object
type: object
status:
description: Status is the status of the ClusterSet
description: Status reflects the observed state of the ClusterSet.
properties:
conditions:
description: Conditions are the invidual conditions for the cluster
set
description: Conditions are the individual conditions for the cluster
set.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
@@ -190,7 +308,7 @@ spec:
type: array
lastUpdateTime:
description: LastUpdate is the timestamp when the status was last
updated
updated.
type: string
observedGeneration:
description: ObservedGeneration was the generation at the time the
@@ -198,12 +316,15 @@ spec:
format: int64
type: integer
summary:
description: Summary is a summary of the status
description: Summary is a summary of the status.
type: string
type: object
required:
- spec
type: object
x-kubernetes-validations:
- message: Name must match 'default'
rule: self.metadata.name == "default"
served: true
storage: true
subresources:

View File

@@ -1,16 +1,16 @@
package cluster
package cmds
import (
"github.com/urfave/cli/v2"
)
func NewCommand() *cli.Command {
func NewClusterCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: []*cli.Command{
NewCreateCmd(),
NewDeleteCmd(),
NewClusterCreateCmd(appCtx),
NewClusterDeleteCmd(appCtx),
},
}
}

View File

@@ -1,57 +0,0 @@
package cluster
import (
"context"
"errors"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func NewDeleteCmd() *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
Action: delete,
Flags: cmds.CommonFlags,
}
}
func delete(clx *cli.Context) error {
ctx := context.Background()
name := clx.Args().First()
if name == "" {
return errors.New("empty cluster name")
} else if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("deleting [%s] cluster", name)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: cmds.Namespace(),
},
}
return ctrlClient.Delete(ctx, &cluster)
}

View File

@@ -1,15 +1,14 @@
package cluster
package cmds
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"slices"
"strings"
"time"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -18,23 +17,13 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var Scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
type CreateConfig struct {
token string
clusterCIDR string
@@ -48,73 +37,109 @@ type CreateConfig struct {
version string
mode string
kubeconfigServerHost string
clusterset string
}
func NewCreateCmd() *cli.Command {
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
createConfig := &CreateConfig{}
createFlags := NewCreateFlags(createConfig)
return &cli.Command{
Name: "create",
Usage: "Create new cluster",
Action: createAction(createConfig),
Flags: append(cmds.CommonFlags, createFlags...),
Args: false,
ArgsUsage: "NAME",
Name: "create",
Usage: "Create new cluster",
UsageText: "k3kcli cluster create [command options] NAME",
Action: createAction(appCtx, createConfig),
Flags: WithCommonFlags(appCtx, createFlags...),
HideHelpCommand: true,
}
}
func createAction(config *CreateConfig) cli.ActionFunc {
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
return func(clx *cli.Context) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == "" {
return errors.New("empty cluster name")
} else if name == k3kcluster.ClusterInvalidName {
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
namespace := appCtx.Namespace(name)
// if clusterset is set, use the namespace of the clusterset
if config.clusterset != "" {
namespace = appCtx.Namespace(config.clusterset)
}
if err := createNamespace(ctx, client, namespace); err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
// if clusterset is set, create the cluster set
if config.clusterset != "" {
namespace = appCtx.Namespace(config.clusterset)
clusterSet := &v1alpha1.ClusterSet{}
if err := client.Get(ctx, types.NamespacedName{Name: "default", Namespace: namespace}, clusterSet); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
clusterSet, err = createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), config.clusterset)
if err != nil {
return err
}
}
logrus.Infof("ClusterSet in namespace [%s] available", namespace)
if !slices.Contains(clusterSet.Spec.AllowedModeTypes, v1alpha1.ClusterMode(config.mode)) {
return fmt.Errorf("invalid '%s' Cluster mode. ClusterSet only allows %v", config.mode, clusterSet.Spec.AllowedModeTypes)
}
}
if strings.Contains(config.version, "+") {
orig := config.version
config.version = strings.Replace(config.version, "+", "-", -1)
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
}
if config.token != "" {
logrus.Infof("Creating cluster token secret")
obj := k3kcluster.TokenSecretObj(config.token, name, cmds.Namespace())
if err := ctrlClient.Create(ctx, &obj); err != nil {
logrus.Info("Creating cluster token secret")
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
if err := client.Create(ctx, &obj); err != nil {
return err
}
}
logrus.Infof("Creating a new cluster [%s]", name)
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
cluster := newCluster(name, cmds.Namespace(), config)
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
url, err := url.Parse(restConfig.Host)
url, err := url.Parse(appCtx.RestConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
if config.kubeconfigServerHost != "" {
host = []string{config.kubeconfigServerHost}
}
cluster.Spec.TLSSANs = []string{host[0]}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if err := client.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
} else {
@@ -136,30 +161,15 @@ func createAction(config *CreateConfig) cli.ActionFunc {
cfg := kubeconfig.New()
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
kubeconfig, err = cfg.Extract(ctx, client, cluster, host[0])
return err
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
kubeconfigData, err := clientcmd.Write(*kubeconfig)
if err != nil {
return err
}
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfigData, 0644)
return writeKubeconfigFile(cluster, kubeconfig)
}
}
@@ -191,6 +201,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
if config.storageClassName == "" {
cluster.Spec.Persistence.StorageClassName = nil
}
if config.token != "" {
cluster.Spec.TokenSecretRef = &v1.SecretReference{
Name: k3kcluster.TokenSecretName(name),

View File

@@ -1,4 +1,4 @@
package cluster
package cmds
import (
"errors"
@@ -43,15 +43,15 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
},
&cli.StringFlag{
Name: "persistence-type",
Usage: "persistence mode for the nodes (ephemeral, static, dynamic)",
Value: string(v1alpha1.DynamicNodesType),
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
Value: string(v1alpha1.DynamicPersistenceMode),
Destination: &config.persistenceType,
Action: func(ctx *cli.Context, value string) error {
switch v1alpha1.PersistenceMode(value) {
case v1alpha1.EphemeralNodeType, v1alpha1.DynamicNodesType:
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "ephemeral", "static" or "dynamic"`)
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
}
},
},
@@ -61,14 +61,14 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
Destination: &config.storageClassName,
},
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Value: &config.serverArgs,
Name: "server-args",
Usage: "servers extra arguments",
Destination: &config.serverArgs,
},
&cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Value: &config.agentArgs,
Name: "agent-args",
Usage: "agents extra arguments",
Destination: &config.agentArgs,
},
&cli.StringFlag{
Name: "version",
@@ -77,7 +77,7 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
},
&cli.StringFlag{
Name: "mode",
Usage: "k3k mode type",
Usage: "k3k mode type (shared, virtual)",
Destination: &config.mode,
Value: "shared",
Action: func(ctx *cli.Context, value string) error {
@@ -94,5 +94,10 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
Usage: "override the kubeconfig server host",
Destination: &config.kubeconfigServerHost,
},
&cli.StringFlag{
Name: "clusterset",
Usage: "The clusterset to create the cluster in",
Destination: &config.clusterset,
},
}
}

117
cli/cmds/cluster_delete.go Normal file
View File

@@ -0,0 +1,117 @@
package cmds
import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
var keepData bool
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing cluster",
UsageText: "k3kcli cluster delete [command options] NAME",
Action: delete(appCtx),
Flags: WithCommonFlags(appCtx, &cli.BoolFlag{
Name: "keep-data",
Usage: "keeps persistence volumes created for the cluster after deletion",
Destination: &keepData,
}),
HideHelpCommand: true,
}
}
func delete(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
namespace := appCtx.Namespace(name)
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
// keep bootstrap secrets and tokens if --keep-data flag is passed
if keepData {
// skip removing tokenSecret
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
return err
}
// skip removing webhook secret
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
return err
}
} else {
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
matchingLabels.ApplyToList(&listOpts)
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
return ctrlclient.IgnoreNotFound(err)
}
}
if err := client.Delete(ctx, &cluster); err != nil {
return ctrlclient.IgnoreNotFound(err)
}
return nil
}
}
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
var secret v1.Secret
key := types.NamespacedName{
Name: name,
Namespace: cluster.Namespace,
}
if err := cl.Get(ctx, key, &secret); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("%s secret is not found", name)
return nil
}
return err
}
if controllerutil.HasControllerReference(&secret) {
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
return err
}
return cl.Update(ctx, &secret)
}
return nil
}

16
cli/cmds/clusterset.go Normal file
View File

@@ -0,0 +1,16 @@
package cmds
import (
"github.com/urfave/cli/v2"
)
func NewClusterSetCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "clusterset",
Usage: "clusterset command",
Subcommands: []*cli.Command{
NewClusterSetCreateCmd(appCtx),
NewClusterSetDeleteCmd(appCtx),
},
}
}

View File

@@ -0,0 +1,138 @@
package cmds
import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type ClusterSetCreateConfig struct {
mode string
displayName string
}
func NewClusterSetCreateCmd(appCtx *AppContext) *cli.Command {
config := &ClusterSetCreateConfig{}
createFlags := []cli.Flag{
&cli.StringFlag{
Name: "mode",
Usage: "The allowed mode type of the clusterset",
Destination: &config.mode,
Value: "shared",
Action: func(ctx *cli.Context, value string) error {
switch value {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
}
},
},
&cli.StringFlag{
Name: "display-name",
Usage: "The display name of the clusterset",
Destination: &config.displayName,
},
}
return &cli.Command{
Name: "create",
Usage: "Create new clusterset",
UsageText: "k3kcli clusterset create [command options] NAME",
Action: clusterSetCreateAction(appCtx, config),
Flags: WithCommonFlags(appCtx, createFlags...),
HideHelpCommand: true,
}
}
func clusterSetCreateAction(appCtx *AppContext, config *ClusterSetCreateConfig) cli.ActionFunc {
return func(clx *cli.Context) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
displayName := config.displayName
if displayName == "" {
displayName = name
}
// if both display name and namespace are set the name is ignored
if config.displayName != "" && appCtx.namespace != "" {
logrus.Warnf("Ignoring name [%s] because display name and namespace are set", name)
}
namespace := appCtx.Namespace(name)
if err := createNamespace(ctx, client, namespace); err != nil {
return err
}
_, err := createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), displayName)
return err
}
}
func createNamespace(ctx context.Context, client client.Client, name string) error {
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
if !apierrors.IsNotFound(err) {
return err
}
logrus.Infof(`Creating namespace [%s]`, name)
if err := client.Create(ctx, ns); err != nil {
return err
}
}
return nil
}
func createClusterSet(ctx context.Context, client client.Client, namespace string, mode v1alpha1.ClusterMode, displayName string) (*v1alpha1.ClusterSet, error) {
logrus.Infof("Creating clusterset in namespace [%s]", namespace)
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "ClusterSet",
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.ClusterSetSpec{
AllowedModeTypes: []v1alpha1.ClusterMode{mode},
DisplayName: displayName,
},
}
if err := client.Create(ctx, clusterSet); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("ClusterSet in namespace [%s] already exists", namespace)
} else {
return nil, err
}
}
return clusterSet, nil
}

View File

@@ -0,0 +1,61 @@
package cmds
import (
"context"
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func NewClusterSetDeleteCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "delete",
Usage: "Delete an existing clusterset",
UsageText: "k3kcli clusterset delete [command options] NAME",
Action: clusterSetDeleteAction(appCtx),
Flags: WithCommonFlags(appCtx),
HideHelpCommand: true,
}
}
func clusterSetDeleteAction(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
ctx := context.Background()
client := appCtx.Client
if clx.NArg() != 1 {
return cli.ShowSubcommandHelp(clx)
}
name := clx.Args().First()
if name == k3kcluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
namespace := appCtx.Namespace(name)
logrus.Infof("Deleting clusterset in namespace [%s]", namespace)
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
if err := client.Delete(ctx, clusterSet); err != nil {
if apierrors.IsNotFound(err) {
logrus.Warnf("ClusterSet not found in namespace [%s]", namespace)
} else {
return err
}
}
return nil
}
}

View File

@@ -1,4 +1,4 @@
package kubeconfig
package cmds
import (
"context"
@@ -8,7 +8,6 @@ import (
"strings"
"time"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
@@ -16,23 +15,14 @@ import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
var (
Scheme = runtime.NewScheme()
name string
cn string
org cli.StringSlice
@@ -82,83 +72,88 @@ var (
}
)
var subcommands = []*cli.Command{
{
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: []*cli.Command{
NewKubeconfigGenerateCmd(appCtx),
},
}
}
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
return &cli.Command{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
Action: generate,
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
},
}
func NewCommand() *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
Action: generate(appCtx),
Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...),
}
}
func generate(clx *cli.Context) error {
var cluster v1alpha1.Cluster
ctx := context.Background()
func generate(appCtx *AppContext) cli.ActionFunc {
return func(clx *cli.Context) error {
ctx := context.Background()
client := appCtx.Client
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: name,
Namespace: appCtx.Namespace(name),
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: name,
Namespace: cmds.Namespace(),
}
var cluster v1alpha1.Cluster
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
if kubeconfigServerHost != "" {
host = []string{kubeconfigServerHost}
err := altNames.Set(kubeconfigServerHost)
url, err := url.Parse(appCtx.RestConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
if kubeconfigServerHost != "" {
host = []string{kubeconfigServerHost}
if err := altNames.Set(kubeconfigServerHost); err != nil {
return err
}
}
certAltNames := certs.AddSANs(altNames.Value())
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, client, &cluster, host[0])
return err
}); err != nil {
return err
}
return writeKubeconfigFile(&cluster, kubeconfig)
}
}
certAltNames := certs.AddSANs(altNames.Value())
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig *clientcmdapi.Config
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
return err
}); err != nil {
return err
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}
pwd, err := os.Getwd()
@@ -166,11 +161,7 @@ func generate(clx *cli.Context) error {
return err
}
if configName == "" {
configName = cluster.Name + "-kubeconfig.yaml"
}
logrus.Infof(`You can start using the cluster with:
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info

View File

@@ -1,62 +1,117 @@
package cmds
import (
"os"
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
defaultNamespace = "default"
)
type AppContext struct {
RestConfig *rest.Config
Client client.Client
var (
debug bool
Kubeconfig string
namespace string
CommonFlags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVars: []string{"KUBECONFIG"},
Usage: "kubeconfig path",
Destination: &Kubeconfig,
Value: os.Getenv("HOME") + "/.kube/config",
},
&cli.StringFlag{
Name: "namespace",
Usage: "namespace to create the k3k cluster in",
Destination: &namespace,
},
}
)
// Global flags
Debug bool
Kubeconfig string
namespace string
}
func NewApp() *cli.App {
appCtx := &AppContext{}
app := cli.NewApp()
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = []cli.Flag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &debug,
EnvVars: []string{"K3K_DEBUG"},
},
}
app.Flags = WithCommonFlags(appCtx)
app.Before = func(clx *cli.Context) error {
if debug {
if appCtx.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
if err != nil {
return err
}
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
if err != nil {
return err
}
appCtx.RestConfig = restConfig
appCtx.Client = ctrlClient
return nil
}
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
app.Commands = []*cli.Command{
NewClusterCmd(appCtx),
NewClusterSetCmd(appCtx),
NewKubeconfigCmd(appCtx),
}
return app
}
func Namespace() string {
if namespace == "" {
return defaultNamespace
func (ctx *AppContext) Namespace(name string) string {
if ctx.namespace != "" {
return ctx.namespace
}
return namespace
return "k3k-" + name
}
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
if kubeconfig != "" {
loadingRules.ExplicitPath = kubeconfig
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
return kubeConfig.ClientConfig()
}
func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag {
commonFlags := []cli.Flag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &appCtx.Debug,
EnvVars: []string{"K3K_DEBUG"},
},
&cli.StringFlag{
Name: "kubeconfig",
Usage: "kubeconfig path",
Destination: &appCtx.Kubeconfig,
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
},
&cli.StringFlag{
Name: "namespace",
Usage: "namespace to create the k3k cluster in",
Destination: &appCtx.namespace,
},
}
return append(commonFlags, flags...)
}

View File

@@ -1,29 +1,14 @@
package main
import (
"fmt"
"os"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/cli/cmds/kubeconfig"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
app.Commands = []*cli.Command{
cluster.NewCommand(),
kubeconfig.NewCommand(),
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}

View File

@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the `k3kcli` documentation for more details.
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
@@ -94,14 +94,14 @@ In this example we are exposing the Cluster with a Nginx ingress-controller, tha
### `clusterCIDR`
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16`.
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16` in shared mode, and `10.52.0.0/16` in virtual mode.
### `serviceCIDR`
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16`.
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16` in shared mode, and `10.53.0.0/16` in virtual mode.
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts.
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts and in `virtual` mode both `serviceCIDR` and `clusterCIDR` should be different than the host cluster.
### `clusterDNS`
@@ -112,3 +112,21 @@ The `clusterDNS` field specifies the IP address for the CoreDNS service. It need
### `serverArgs`
The `serverArgs` field allows you to specify additional arguments to be passed to the K3s server pods.
## Using the cli
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
### No storage provider:
* Ephemeral Storage:
```bash
k3kcli cluster create --persistence-type ephemeral my-cluster
```
*Important Notes:*
* Using `--persistence-type ephemeral` will result in data loss if the nodes are restarted.
* It is highly recommended to use `--persistence-type dynamic` with a configured storage class.

146
docs/cli/cli-docs.md Normal file
View File

@@ -0,0 +1,146 @@
# NAME
k3kcli - CLI for K3K
# SYNOPSIS
k3kcli
```
[--debug]
[--kubeconfig]=[value]
[--namespace]=[value]
```
**Usage**:
```
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
```
# GLOBAL OPTIONS
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace**="": namespace to create the k3k cluster in
# COMMANDS
## cluster
cluster command
### create
Create new cluster
>k3kcli cluster create [command options] NAME
**--agent-args**="": agents extra arguments
**--agents**="": number of agents (default: 0)
**--cluster-cidr**="": cluster CIDR
**--clusterset**="": The clusterset to create the cluster in
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
**--namespace**="": namespace to create the k3k cluster in
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
**--server-args**="": servers extra arguments
**--servers**="": number of servers (default: 1)
**--service-cidr**="": service CIDR
**--storage-class-name**="": storage class name for dynamic persistence type
**--token**="": token of the cluster
**--version**="": k3s version
### delete
Delete an existing cluster
>k3kcli cluster delete [command options] NAME
**--debug**: Turn on debug logs
**--keep-data**: keeps persistence volumes created for the cluster after deletion
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace**="": namespace to create the k3k cluster in
## clusterset
clusterset command
### create
Create new clusterset
>k3kcli clusterset create [command options] NAME
**--debug**: Turn on debug logs
**--display-name**="": The display name of the clusterset
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--mode**="": The allowed mode type of the clusterset (default: "shared")
**--namespace**="": namespace to create the k3k cluster in
### delete
Delete an existing clusterset
>k3kcli clusterset delete [command options] NAME
**--debug**: Turn on debug logs
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace**="": namespace to create the k3k cluster in
## kubeconfig
Manage kubeconfig for clusters
### generate
Generate kubeconfig for clusters
**--altNames**="": altNames of the generated certificates for the kubeconfig
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
**--config-name**="": the name of the generated kubeconfig file
**--debug**: Turn on debug logs
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
**--name**="": cluster name
**--namespace**="": namespace to create the k3k cluster in
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig

37
docs/cli/genclidoc.go Normal file
View File

@@ -0,0 +1,37 @@
package main
import (
"fmt"
"os"
"path"
"github.com/rancher/k3k/cli/cmds"
)
func main() {
// Instantiate the CLI application
app := cmds.NewApp()
// Generate the Markdown documentation
md, err := app.ToMarkdown()
if err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
wd, err := os.Getwd()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
err = os.WriteFile(outputFile, []byte(md), 0644)
if err != nil {
fmt.Println("Error generating documentation:", err)
os.Exit(1)
}
fmt.Println("Documentation generated at " + outputFile)
}

View File

@@ -1,6 +0,0 @@
CRD_REF_DOCS_VER := v0.1.0
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
.PHONY: generate
generate:
$(CRD_REF_DOCS) --config=config.yaml --renderer=markdown --source-path=../../pkg/apis/k3k.io/v1alpha1 --output-path=crd-docs.md

View File

@@ -17,7 +17,7 @@
Addon specifies a Secret containing YAML to be deployed on cluster startup.
@@ -26,15 +26,17 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `secretNamespace` _string_ | | | |
| `secretRef` _string_ | | | |
| `secretNamespace` _string_ | SecretNamespace is the namespace of the Secret. | | |
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
#### Cluster
Cluster defines a virtual Kubernetes cluster managed by k3k.
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
k3k uses this to provision and manage these virtual clusters.
@@ -46,31 +48,14 @@ _Appears in:_
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | | \{ \} | |
#### ClusterLimit
_Appears in:_
- [ClusterSpec](#clusterspec)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit is the limits (cpu/mem) that apply to the server nodes | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit is the limits (cpu/mem) that apply to the agent nodes | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
#### ClusterList
ClusterList is a list of Cluster resources.
@@ -102,7 +87,7 @@ _Appears in:_
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
@@ -111,23 +96,24 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `version` _string_ | Version is a string representing the Kubernetes version to be used by the virtual nodes. | | |
| `servers` _integer_ | Servers is the number of K3s pods to run in server (controlplane) mode. | 1 | |
| `agents` _integer_ | Agents is the number of K3s pods to run in agent (worker) mode. | 0 | |
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the node selector that will be applied to all server/agent pods.<br />In "shared" mode the node selector will be applied also to the workloads. | | |
| `priorityClass` _string_ | PriorityClass is the priorityClassName that will be applied to all server/agent pods.<br />In "shared" mode the priorityClassName will be applied also to the workloads. | | |
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit is the limits that apply for the server/worker nodes. | | |
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller<br />assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored. | | |
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16. | | |
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16. | | |
| `clusterDNS` _string_ | ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.<br />Defaults to 10.43.0.10. | | |
| `serverArgs` _string array_ | ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode. | | |
| `agentArgs` _string array_ | AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode. | | |
| `tlsSANs` _string array_ | TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use. | | |
| `addons` _[Addon](#addon) array_ | Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup. | | |
| `mode` _[ClusterMode](#clustermode)_ | Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared" | shared | Enum: [shared virtual] <br /> |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data<br />persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. | \{ type:dynamic \} | |
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a<br />clusterIP which is relatively secure, but difficult to access outside of the cluster. | | |
| `version` _string_ | Version is the K3s version to use for the virtual nodes.<br />It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).<br />If not specified, the Kubernetes version of the host node will be used. | | |
| `mode` _[ClusterMode](#clustermode)_ | Mode specifies the cluster provisioning mode: "shared" or "virtual".<br />Defaults to "shared". This field is immutable. | shared | Enum: [shared virtual] <br /> |
| `servers` _integer_ | Servers specifies the number of K3s pods to run in server (control plane) mode.<br />Must be at least 1. Defaults to 1. | 1 | |
| `agents` _integer_ | Agents specifies the number of K3s pods to run in agent (worker) mode.<br />Must be 0 or greater. Defaults to 0.<br />This field is ignored in "shared" mode. | 0 | |
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
@@ -136,7 +122,7 @@ _Appears in:_
ExposeConfig specifies options for exposing the API server.
@@ -145,16 +131,16 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `ingress` _[IngressConfig](#ingressconfig)_ | | | |
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | | | |
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | | | |
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
#### IngressConfig
IngressConfig specifies options for exposing the API server through an Ingress.
@@ -163,31 +149,28 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `annotations` _object (keys:string, values:string)_ | Annotations is a key value map that will enrich the Ingress annotations | | |
| `ingressClassName` _string_ | | | |
| `annotations` _object (keys:string, values:string)_ | Annotations specifies annotations to add to the Ingress. | | |
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
#### LoadBalancerConfig
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
_Appears in:_
- [ExposeConfig](#exposeconfig)
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `enabled` _boolean_ | | | |
#### NodePortConfig
NodePortConfig specifies options for exposing the API server through NodePort.
@@ -196,16 +179,16 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
#### PersistenceConfig
PersistenceConfig specifies options for persisting etcd data.
@@ -215,9 +198,9 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | | dynamic | |
| `storageClassName` _string_ | | | |
| `storageRequestSize` _string_ | | | |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
#### PersistenceMode

View File

@@ -1,15 +1,154 @@
# Development
## Prerequisites
To start developing K3k you will need:
- Go
- Docker
- Helm
- A running Kubernetes cluster
### TLDR
```shell
#!/bin/bash
set -euo pipefail
# These environment variables configure the image repository and tag.
export REPO=ghcr.io/myuser
export VERSION=dev-$(date -u '+%Y%m%d%H%M')
make
make push
make install
```
### Makefile
To see all the available Make commands you can run `make help`, i.e:
```
-> % make help
all Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
version Print the current version
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
package Package the k3k and k3k-kubelet Docker images
push Push the K3k images to the registry
test Run all the tests
test-unit Run the unit tests (skips the e2e)
test-controller Run the controller tests (pkg/controller)
test-e2e Run the e2e tests
build-crds Build the CRDs specs
docs Build the CRDs docs
lint Find any linting issues in the project
validate Validate the project checking for any dependency or doc mismatch
install Install K3k with Helm on the targeted Kubernetes cluster
help Show this help.
```
### Build
To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`.
By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var:
```
REPO=ghcr.io/userorg make
```
To customize the tag you can also explicitly set the VERSION:
```
VERSION=dev-$(date -u '+%Y%m%d%H%M') make
```
### Push
You will need to push the built images to your registry, and you can use the `make push` command to do this.
### Install
Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release.
## Tests
To run the tests we use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
Install the required binaries from `envtest` with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), and then put them in the default path `/usr/local/kubebuilder/bin`:
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
```
ENVTEST_BIN=$(setup-envtest use -p path)
sudo mkdir -p /usr/local/kubebuilder/bin
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder.
## CRDs and Docs
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
Remember also to update the CRDs documentation running the `make docs` command.
## How to install k3k on k3d
This document provides a guide on how to install k3k on [k3d](https://k3d.io).
### Installing k3d
Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation.
Create the k3d cluster in the following way:
```bash
k3d cluster create k3k -p "30000-30010:30000-30010@server:0"
```
then run `ginkgo run ./...`.
With this syntax ports from 30000 to 30010 will be exposed on the host.
### Install k3k
Install now k3k as usual:
```bash
helm repo update
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
```
### Create a virtual cluster
Once the k3k controller is up and running, create a namespace where to create our first virtual cluster.
```bash
kubectl create ns k3k-mycluster
```
Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
expose:
nodePort:
serverPort: 30001
EOF
```
Check when the cluster is ready:
```bash
kubectl get po -n k3k-mycluster
```
Last thing to do is to get the kubeconfig to connect to the virtual cluster we've just created:
```bash
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
```

View File

@@ -4,7 +4,7 @@ metadata:
name: clusterset-example
# spec:
# disableNetworkPolicy: false
# allowedNodeTypes:
# allowedModeTypes:
# - "shared"
# - "virtual"
# podSecurityAdmissionLevel: "baseline"

3
go.mod
View File

@@ -32,6 +32,8 @@ require (
k8s.io/apiserver v0.29.11
k8s.io/client-go v0.29.11
k8s.io/component-base v0.29.11
k8s.io/component-helpers v0.29.11
k8s.io/kubectl v0.29.11
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.17.5
)
@@ -206,7 +208,6 @@ require (
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kms v0.29.11 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/kubectl v0.29.11 // indirect
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect

2
go.sum
View File

@@ -2019,6 +2019,8 @@ k8s.io/client-go v0.29.11 h1:mBX7Ub0uqpLMwWz3J/AGS/xKOZsjr349qZ1vxVoL1l8=
k8s.io/client-go v0.29.11/go.mod h1:WOEoi/eLg2YEg3/yEd7YK3CNScYkM8AEScQadxUnaTE=
k8s.io/component-base v0.29.11 h1:H3GJIyDNPrscvXGP6wx+9gApcwwmrUd0YtCGp5BcHBA=
k8s.io/component-base v0.29.11/go.mod h1:0qu1WStER4wu5o8RMRndZUWPVcPH1XBy/QQiDcD6lew=
k8s.io/component-helpers v0.29.11 h1:GdZaSLBLlCa+EzjAnpZ4fGB75rA3qqPLLZKk+CsqNyo=
k8s.io/component-helpers v0.29.11/go.mod h1:gloyih9IiE4Qy/7iLUXqAmxYSUduuIpMCiNYuHfYvD4=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.29.11 h1:pylaiDJhgfqczvcjMDPI89+VH0OVoGQhscPH1VbBzQE=

View File

@@ -31,33 +31,43 @@ func (c *config) unmarshalYAML(data []byte) error {
if c.ClusterName == "" {
c.ClusterName = conf.ClusterName
}
if c.ClusterNamespace == "" {
c.ClusterNamespace = conf.ClusterNamespace
}
if c.HostConfigPath == "" {
c.HostConfigPath = conf.HostConfigPath
}
if c.VirtualConfigPath == "" {
c.VirtualConfigPath = conf.VirtualConfigPath
}
if c.KubeletPort == "" {
c.KubeletPort = conf.KubeletPort
}
if c.AgentHostname == "" {
c.AgentHostname = conf.AgentHostname
}
if c.ServiceName == "" {
c.ServiceName = conf.ServiceName
}
if c.Token == "" {
c.Token = conf.Token
}
if c.ServerIP == "" {
c.ServerIP = conf.ServerIP
}
if c.Version == "" {
c.Version = conf.Version
}
return nil
}
@@ -65,12 +75,15 @@ func (c *config) validate() error {
if c.ClusterName == "" {
return errors.New("cluster name is not provided")
}
if c.ClusterNamespace == "" {
return errors.New("cluster namespace is not provided")
}
if c.AgentHostname == "" {
return errors.New("agent Hostname is not provided")
}
return nil
}
@@ -83,5 +96,6 @@ func (c *config) parse(path string) error {
if err != nil {
return err
}
return c.unmarshalYAML(b)
}

View File

@@ -38,6 +38,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.ConfigMap
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
@@ -45,16 +46,19 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
Requeue: true,
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := c.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.ConfigMap
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
@@ -66,6 +70,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
@@ -79,13 +84,14 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = c.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
@@ -94,6 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.objs.Has(key)
}
@@ -104,23 +111,29 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
Namespace: namespace,
Name: name,
}
// if we already sync this object, no need to writelock/add it
if c.isWatching(objKey) {
return nil
}
// lock in write mode since we are now adding the key
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Insert(objKey)
c.mutex.Unlock()
_, err := c.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
@@ -143,24 +156,34 @@ func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name st
}); err != nil {
return fmt.Errorf("unable to remove configmap: %w", err)
}
c.mutex.Lock()
if c.objs == nil {
c.objs = sets.Set[types.NamespacedName]{}
}
c.objs = c.objs.Delete(objKey)
c.mutex.Unlock()
return nil
}
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
var vConfigMap corev1.ConfigMap
err := c.VirtualClient.Get(ctx, types.NamespacedName{Namespace: virtualNamespace, Name: virtualName}, &vConfigMap)
if err != nil {
key := types.NamespacedName{
Namespace: virtualNamespace,
Name: virtualName,
}
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := c.TranslateFunc(&vConfigMap)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return c.HostClient.Delete(ctx, translated)
}

View File

@@ -45,17 +45,22 @@ type updateableReconciler interface {
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
c.RLock()
controllers := c.controllers
if controllers != nil {
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
c.RUnlock()
return err
}
}
// we need to manually lock/unlock since we intned on write locking to add a new controller
c.RUnlock()
var r updateableReconciler
switch obj.(type) {
case *v1.Secret:
r = &SecretSyncer{
@@ -89,19 +94,23 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
// logic could be used for other types.
return fmt.Errorf("unrecognized type: %T", obj)
}
err := ctrl.NewControllerManagedBy(c.Mgr).
For(&v1.ConfigMap{}).
Complete(r)
if err != nil {
return fmt.Errorf("unable to start configmap controller: %w", err)
}
c.Lock()
if c.controllers == nil {
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
}
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
c.Unlock()
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
@@ -112,8 +121,10 @@ func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Objec
c.RLock()
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
c.RUnlock()
if !ok {
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
}
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
}

View File

@@ -51,6 +51,7 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
clusterName: clusterName,
clusterNamespace: clusterNamespace,
}
return ctrl.NewControllerManagedBy(virtMgr).
For(&v1.PersistentVolumeClaim{}).
WithOptions(controller.Options{
@@ -61,11 +62,12 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
var (
virtPVC v1.PersistentVolumeClaim
hostPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
@@ -74,10 +76,12 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedPVC := r.pvc(&virtPVC)
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
return reconcile.Result{}, err
}
// handle deletion
if !virtPVC.DeletionTimestamp.IsZero() {
// deleting the synced service if exists
@@ -90,32 +94,28 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// getting the cluster for setting the controller reference
// Add finalizer if it does not exist
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
return reconcile.Result{}, err
}
}
// create or update the pvc on host
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedPVC.Name, Namespace: r.clusterNamespace}, &hostPVC); err != nil {
if apierrors.IsNotFound(err) {
log.Info("creating the persistent volume for the first time on the host cluster")
return reconcile.Result{}, r.hostClient.Create(ctx, syncedPVC)
}
return reconcile.Result{}, err
}
log.Info("updating pvc on the host cluster")
return reconcile.Result{}, r.hostClient.Update(ctx, syncedPVC)
// create the pvc on host
log.Info("creating the persistent volume for the first time on the host cluster")
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
// handled by the host cluster.
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
}
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
hostPVC := obj.DeepCopy()
r.Translator.TranslateTo(hostPVC)
return hostPVC
}

View File

@@ -0,0 +1,184 @@
package controller
import (
"context"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/log"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/component-helpers/storage/volume"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
podController = "pod-pvc-controller"
pseudoPVLabel = "pod.k3k.io/pseudoPV"
)
type PodReconciler struct {
virtualClient ctrlruntimeclient.Client
hostClient ctrlruntimeclient.Client
clusterName string
clusterNamespace string
Scheme *runtime.Scheme
HostScheme *runtime.Scheme
logger *log.Logger
Translator translate.ToHostTranslator
}
// AddPodPVCController adds pod controller to k3k-kubelet
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
translator := translate.ToHostTranslator{
ClusterName: clusterName,
ClusterNamespace: clusterNamespace,
}
// initialize a new Reconciler
reconciler := PodReconciler{
virtualClient: virtMgr.GetClient(),
hostClient: hostMgr.GetClient(),
Scheme: virtMgr.GetScheme(),
HostScheme: hostMgr.GetScheme(),
logger: logger.Named(podController),
Translator: translator,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
}
return ctrl.NewControllerManagedBy(virtMgr).
For(&v1.Pod{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
Complete(&reconciler)
}
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
var (
virtPod v1.Pod
cluster v1alpha1.Cluster
)
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
// handling pod
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// reconcile pods with pvcs
for _, vol := range virtPod.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
log.Info("Handling pod with pvc")
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
return reconcile.Result{}, err
}
}
}
return reconcile.Result{}, nil
}
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
// and then created on the host, the PV is not synced to the host cluster.
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
var pvc v1.PersistentVolumeClaim
key := types.NamespacedName{
Name: pvcSource.ClaimName,
Namespace: pod.Namespace,
}
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
return ctrlruntimeclient.IgnoreNotFound(err)
}
log.Info("Creating pseudo Persistent Volume")
pv := r.pseudoPV(&pvc)
if err := r.virtualClient.Create(ctx, pv); err != nil {
return ctrlruntimeclient.IgnoreAlreadyExists(err)
}
orig := pv.DeepCopy()
pv.Status = v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
}
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
return err
}
log.Info("Patch the status of PersistentVolumeClaim to Bound")
pvcPatch := pvc.DeepCopy()
if pvcPatch.Annotations == nil {
pvcPatch.Annotations = make(map[string]string)
}
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
pvcPatch.Status.Phase = v1.ClaimBound
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
return r.virtualClient.Status().Update(ctx, pvcPatch)
}
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
var storageClass string
if obj.Spec.StorageClassName != nil {
storageClass = *obj.Spec.StorageClassName
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
Labels: map[string]string{
pseudoPVLabel: "true",
},
Annotations: map[string]string{
volume.AnnBoundByController: "true",
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
},
},
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolume",
APIVersion: "v1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "pseudopv",
},
},
StorageClassName: storageClass,
VolumeMode: obj.Spec.VolumeMode,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
AccessModes: obj.Spec.AccessModes,
Capacity: obj.Spec.Resources.Requests,
ClaimRef: &v1.ObjectReference{
APIVersion: obj.APIVersion,
UID: obj.UID,
ResourceVersion: obj.ResourceVersion,
Kind: obj.Kind,
Namespace: obj.Namespace,
Name: obj.Name,
},
},
}
}

View File

@@ -38,6 +38,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
// return immediately without re-enqueueing. We aren't watching this resource
return reconcile.Result{}, nil
}
var virtual corev1.Secret
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
@@ -45,16 +46,19 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
Requeue: true,
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translated, err := s.TranslateFunc(&virtual)
if err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
}
translatedKey := types.NamespacedName{
Namespace: translated.Namespace,
Name: translated.Name,
}
var host corev1.Secret
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
if apierrors.IsNotFound(err) {
@@ -66,6 +70,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
}
// we are going to use the host in order to avoid conflicts on update
@@ -79,13 +84,14 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
for key, value := range translated.Labels {
host.Labels[key] = value
}
if err = s.HostClient.Update(ctx, &host); err != nil {
return reconcile.Result{
Requeue: true,
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
}
return reconcile.Result{}, nil
}
@@ -94,6 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.objs.Has(key)
}
@@ -113,14 +120,18 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Insert(objKey)
s.mutex.Unlock()
_, err := s.Reconcile(ctx, reconcile.Request{
NamespacedName: objKey,
})
if err != nil {
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
}
return nil
}
@@ -148,8 +159,10 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
if s.objs == nil {
s.objs = sets.Set[types.NamespacedName]{}
}
s.objs = s.objs.Delete(objKey)
s.mutex.Unlock()
return nil
}
@@ -159,12 +172,15 @@ func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, v
Namespace: virtualNamespace,
Name: virtualName,
}, &vSecret)
if err != nil {
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
}
translated, err := s.TranslateFunc(&vSecret)
if err != nil {
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
}
return s.HostClient.Delete(ctx, translated)
}

View File

@@ -53,6 +53,7 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
clusterName: clusterName,
clusterNamespace: clusterNamespace,
}
return ctrl.NewControllerManagedBy(virtMgr).
For(&v1.Service{}).
WithOptions(controller.Options{
@@ -63,9 +64,11 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
if req.Name == "kubernetes" || req.Name == "kube-dns" {
return reconcile.Result{}, nil
}
var (
virtService v1.Service
hostService v1.Service
@@ -75,9 +78,11 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err
}
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
syncedService := s.service(&virtService)
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
return reconcile.Result{}, err
@@ -89,19 +94,23 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// remove the finalizer after cleaning up the synced service
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// Add finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
return reconcile.Result{}, err
}
@@ -112,9 +121,12 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("creating the service for the first time on the host cluster")
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
}
return reconcile.Result{}, err
}
log.Info("updating service on the host cluster")
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
}

View File

@@ -32,7 +32,6 @@ const (
type webhookHandler struct {
client ctrlruntimeclient.Client
scheme *runtime.Scheme
nodeName string
serviceName string
clusterName string
clusterNamespace string
@@ -42,7 +41,7 @@ type webhookHandler struct {
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, serviceName string, logger *log.Logger) error {
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -50,7 +49,6 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
serviceName: serviceName,
clusterName: clusterName,
clusterNamespace: clusterNamespace,
nodeName: nodeName,
}
// create mutator webhook configuration to the cluster
@@ -58,6 +56,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
if err != nil {
return err
}
if err := handler.client.Create(ctx, config); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
@@ -72,14 +71,13 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
if !ok {
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
if pod.Spec.NodeName == "" {
pod.Spec.NodeName = w.nodeName
}
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
for i, container := range pod.Spec.Containers {
for j, env := range container.Env {
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
@@ -94,22 +92,28 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
}
}
}
return nil
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
var (
webhookTLSSecret v1.Secret
)
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
return nil, err
}
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
if !ok {
return nil, errors.New("webhook CABundle does not exist in secret")
}
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: "admissionregistration.k8s.io/v1",
@@ -156,10 +160,12 @@ func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
if len(s) != 3 {
return -1, "", errors.New("fieldpath annotation is not set correctly")
}
containerIndex, err := strconv.Atoi(s[1])
if err != nil {
return -1, "", err
}
envName := s[2]
return containerIndex, envName, nil

View File

@@ -82,6 +82,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
if err != nil {
return nil, err
}
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
if err != nil {
return nil, err
@@ -93,7 +94,10 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
}
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
Scheme: baseScheme,
Scheme: baseScheme,
LeaderElection: true,
LeaderElectionNamespace: c.ClusterNamespace,
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8083",
},
@@ -107,40 +111,55 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("unable to create controller-runtime mgr for host cluster: " + err.Error())
}
virtualScheme := runtime.NewScheme()
// virtual client will only use core types (for now), no need to add anything other than the basics
err = clientgoscheme.AddToScheme(virtualScheme)
if err != nil {
virtualScheme := runtime.NewScheme()
if err := clientgoscheme.AddToScheme(virtualScheme); err != nil {
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
}
webhookServer := webhook.NewServer(webhook.Options{
CertDir: "/opt/rancher/k3k-webhook",
})
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
Scheme: virtualScheme,
WebhookServer: webhookServer,
Scheme: virtualScheme,
WebhookServer: webhookServer,
LeaderElection: true,
LeaderElectionNamespace: "kube-system",
LeaderElectionID: c.ClusterName,
Metrics: ctrlserver.Options{
BindAddress: ":8084",
},
})
if err != nil {
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.AgentHostname, c.ServiceName, logger); err != nil {
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
}
logger.Info("adding service syncer controller")
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
return nil, errors.New("failed to add service syncer controller: " + err.Error())
}
logger.Info("adding pvc syncer controller")
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
}
logger.Info("adding pod pvc controller")
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
}
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
if err != nil {
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
@@ -148,6 +167,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
// get the cluster's DNS IP to be injected to pods
var dnsService v1.Service
dnsName := controller.SafeConcatNameWithPrefix(c.ClusterName, "kube-dns")
if err := hostClient.Get(ctx, types.NamespacedName{Name: dnsName, Namespace: c.ClusterNamespace}, &dnsService); err != nil {
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
@@ -177,10 +197,16 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostClient ctrlruntimeclient.Client) (string, error) {
var service v1.Service
serviceKey := types.NamespacedName{Namespace: clusterNamespace, Name: serviceName}
serviceKey := types.NamespacedName{
Namespace: clusterNamespace,
Name: serviceName,
}
if err := hostClient.Get(ctx, serviceKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
@@ -189,10 +215,12 @@ func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace,
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
var err error
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
if err != nil {
return errors.New("unable to start kubelet: " + err.Error())
}
return nil
}
@@ -225,10 +253,13 @@ func (k *kubelet) start(ctx context.Context) {
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
}
k.logger.Info("node exited successfully")
}
@@ -253,13 +284,16 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostna
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
return errors.New("unable to attach routes: " + err.Error())
}
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
c.TLSConfig = tlsConfig
return nil
}
}
@@ -273,8 +307,11 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
@@ -285,20 +322,27 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
adminCert, adminKey, err := certs.CreateClientCertKey(
controller.AdminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356),
controller.AdminCommonName,
[]string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
time.Hour*24*time.Duration(356),
b.ClientCA.Content,
b.ClientCAKey.Content)
b.ClientCAKey.Content,
)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return clientcmd.RESTConfigFromKubeConfig(kubeconfigData)
}
@@ -330,10 +374,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
}, func() error {
@@ -343,27 +390,34 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
}
ip := net.ParseIP(agentIP)
altNames := certutil.AltNames{
DNSNames: []string{hostname},
IPs: []net.IP{ip},
}
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
if err != nil {
return nil, errors.New("unable to get cert and key: " + err.Error())
}
clientCert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, errors.New("unable to get key pair: " + err.Error())
}
// create rootCA CertPool
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
if err != nil {
return nil, errors.New("unable to create ca certs: " + err.Error())
}
if len(certs) < 1 {
return nil, errors.New("ca cert is not parsed correctly")
}
pool := x509.NewCertPool()
pool.AddCert(certs[0])

View File

@@ -102,9 +102,11 @@ func main() {
app.Before = func(clx *cli.Context) error {
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
return nil
}
app.Action = run
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
@@ -112,6 +114,7 @@ func main() {
func run(clx *cli.Context) error {
ctx := context.Background()
if err := cfg.parse(configFile); err != nil {
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
}
@@ -119,6 +122,7 @@ func run(clx *cli.Context) error {
if err := cfg.validate(); err != nil {
logger.Fatalw("failed to validate config", zap.Error(err))
}
k, err := newKubelet(ctx, &cfg, logger)
if err != nil {
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))

View File

@@ -107,6 +107,7 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemet
// custom collector in a way that only collects metrics for active containers.
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) {
var errorCount float64
defer func() {
ch <- compbasemetrics.NewLazyConstMetric(resourceScrapeResultDesc, compbasemetrics.GaugeValue, errorCount)
}()
@@ -121,6 +122,7 @@ func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetri
rc.collectContainerCPUMetrics(ch, pod, container)
rc.collectContainerMemoryMetrics(ch, pod, container)
}
rc.collectPodCPUMetrics(ch, pod)
rc.collectPodMemoryMetrics(ch, pod)
}

View File

@@ -119,6 +119,7 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
// If some node labels are specified only the matching nodes will be considered.
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
listOpts := metav1.ListOptions{}
if nodeLabels != nil {
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
@@ -134,7 +135,6 @@ func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interfa
virtualAvailableResources := corev1.ResourceList{}
for _, node := range nodeList.Items {
// check if the node is Ready
for _, condition := range node.Status.Conditions {
if condition.Type != corev1.NodeReady {

View File

@@ -40,6 +40,7 @@ import (
"k8s.io/client-go/transport/spdy"
compbasemetrics "k8s.io/component-base/metrics"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
@@ -110,24 +111,30 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
Follow: opts.Follow,
Previous: opts.Previous,
}
if opts.Tail != 0 {
tailLines := int64(opts.Tail)
options.TailLines = &tailLines
}
if opts.LimitBytes != 0 {
limitBytes := int64(opts.LimitBytes)
options.LimitBytes = &limitBytes
}
if opts.SinceSeconds != 0 {
sinceSeconds := int64(opts.SinceSeconds)
options.SinceSeconds = &sinceSeconds
}
if !opts.SinceTime.IsZero() {
sinceTime := metav1.NewTime(opts.SinceTime)
options.SinceTime = &sinceTime
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
return closer, err
}
@@ -148,10 +155,12 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
Stdout: attach.Stdout() != nil,
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
@@ -179,10 +188,12 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
Stdout: attach.Stdout() != nil,
Stderr: attach.Stderr() != nil,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
if err != nil {
return err
}
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: attach.Stdin(),
Stdout: attach.Stdout(),
@@ -204,8 +215,10 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
}
// fetch the stats from all the nodes
var nodeStats statsv1alpha1.NodeStats
var allPodsStats []statsv1alpha1.PodStats
var (
nodeStats statsv1alpha1.NodeStats
allPodsStats []statsv1alpha1.PodStats
)
for _, n := range nodeList.Items {
res, err := p.CoreClient.RESTClient().
@@ -240,6 +253,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
}
podsNameMap := make(map[string]*v1.Pod)
for _, pod := range pods {
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
podsNameMap[hostPodName] = pod
@@ -284,6 +298,7 @@ func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily,
if err != nil {
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
}
return metricFamily, nil
}
@@ -300,9 +315,9 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
if err != nil {
return err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL())
portAsString := strconv.Itoa(int(port))
readyChannel := make(chan struct{})
stopChannel := make(chan struct{}, 1)
@@ -333,7 +348,9 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
Namespace: p.ClusterNamespace,
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
}
@@ -347,6 +364,11 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
// setting the hostname for the pod if its not set
if pod.Spec.Hostname == "" {
tPod.Spec.Hostname = pod.Name
}
// if the priorityCluss for the virtual cluster is set then override the provided value
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
@@ -368,11 +390,20 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
if err := p.transformTokens(ctx, pod, tPod); err != nil {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP)
p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name,
"Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env)
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Infow("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
// set ownerReference to the cluster object
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
return err
}
return p.HostClient.Create(ctx, tPod)
}
@@ -382,7 +413,9 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
interval = 2 * time.Second
timeout = 10 * time.Second
)
var allErrors error
// retryFn will retry until the operation succeed, or the timeout occurs
retryFn := func(ctx context.Context) (bool, error) {
if lastErr := f(ctx, pod); lastErr != nil {
@@ -390,11 +423,14 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
allErrors = errors.Join(allErrors, lastErr)
return false, nil
}
return true, nil
}
if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, retryFn); err != nil {
return errors.Join(allErrors, ErrRetryTimeout)
}
return nil
}
@@ -403,6 +439,7 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
for _, volume := range volumes {
var optional bool
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
continue
}
@@ -411,17 +448,21 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
if volume.ConfigMap.Optional != nil {
optional = *volume.ConfigMap.Optional
}
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
}
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
} else if volume.Secret != nil {
if volume.Secret.Optional != nil {
optional = *volume.Secret.Optional
}
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
}
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
} else if volume.Projected != nil {
for _, source := range volume.Projected.Sources {
@@ -429,15 +470,18 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
if source.ConfigMap.Optional != nil {
optional = *source.ConfigMap.Optional
}
configMapName := source.ConfigMap.Name
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
}
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
} else if source.Secret != nil {
if source.Secret.Optional != nil {
optional = *source.Secret.Optional
}
secretName := source.Secret.Name
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
@@ -451,56 +495,65 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
}
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
}
}
}
}
return nil
}
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
var configMap corev1.ConfigMap
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: configMapName,
}
err := p.VirtualClient.Get(ctx, nsName, &configMap)
if err != nil {
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
// check if its optional configmap
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
err = p.Handler.AddResource(ctx, &configMap)
if err != nil {
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
var secret corev1.Secret
nsName := types.NamespacedName{
Namespace: podNamespace,
Name: secretName,
}
err := p.VirtualClient.Get(ctx, nsName, &secret)
if err != nil {
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
if apierrors.IsNotFound(err) && optional {
return nil
}
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
err = p.Handler.AddResource(ctx, &secret)
if err != nil {
if err := p.Handler.AddResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
}
return nil
}
@@ -590,16 +643,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
// note that we don't return an error here. The pod was successfully deleted, another process
// should clean this without affecting the user
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
return nil
}
@@ -610,6 +667,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
// for pruning
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
var pods corev1.PodList
// only pods in the same namespace could be using secrets/configmaps that this pod is using
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
@@ -618,35 +676,43 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
if err != nil {
return fmt.Errorf("unable to list pods: %w", err)
}
for _, vPod := range pods.Items {
if vPod.Name == pod.Name {
continue
}
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
pruneSecrets.Delete(secrets...)
pruneConfigMap.Delete(configMaps...)
}
for _, secretName := range pruneSecrets.UnsortedList() {
var secret corev1.Secret
err := p.VirtualClient.Get(ctx, types.NamespacedName{
key := types.NamespacedName{
Name: secretName,
Namespace: pod.Namespace,
}, &secret)
if err != nil {
}
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
err = p.Handler.RemoveResource(ctx, &secret)
if err != nil {
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
}
}
for _, configMapName := range pruneConfigMap.UnsortedList() {
var configMap corev1.ConfigMap
err := p.VirtualClient.Get(ctx, types.NamespacedName{
key := types.NamespacedName{
Name: configMapName,
Namespace: pod.Namespace,
}, &configMap)
if err != nil {
}
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
@@ -654,6 +720,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
}
}
return nil
}
@@ -667,12 +734,15 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
}
var pod corev1.Pod
err := p.HostClient.Get(ctx, hostNamespaceName, &pod)
if err != nil {
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
return nil, fmt.Errorf("error when retrieving pod: %w", err)
}
p.Translator.TranslateFrom(&pod)
return &pod, nil
}
@@ -682,11 +752,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
return pod.Status.DeepCopy(), nil
}
@@ -696,103 +769,112 @@ func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*c
// to return a version after DeepCopy.
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
selector := labels.NewSelector()
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
if err != nil {
return nil, fmt.Errorf("unable to create label selector: %w", err)
}
selector = selector.Add(*requirement)
var podList corev1.PodList
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("unable to list pods: %w", err)
}
retPods := []*corev1.Pod{}
for _, pod := range podList.DeepCopy().Items {
p.Translator.TranslateFrom(&pod)
retPods = append(retPods, &pod)
}
return retPods, nil
}
// configureNetworking will inject network information to each pod to connect them to the
// virtual cluster api server, as well as confiugre DNS information to connect them to the
// synced coredns on the host cluster.
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) {
func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP string) {
// inject serverIP to hostalias for the pod
KubernetesHostAlias := corev1.HostAlias{
IP: serverIP,
Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"},
}
pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias)
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env,
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP_ADDR",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_HOST",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_PORT",
Value: "6443",
},
)
}
// handle init containers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env,
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP_ADDR",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_HOST",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_PORT",
Value: "6443",
},
)
}
pod.Spec.HostAliases = append(pod.Spec.HostAliases, corev1.HostAlias{
IP: serverIP,
Hostnames: []string{
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
},
})
// injecting cluster DNS IP to the pods except for coredns pod
if !strings.HasPrefix(podName, "coredns") {
pod.Spec.DNSPolicy = corev1.DNSNone
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
Nameservers: []string{
p.dnsIP,
dnsIP,
},
Searches: []string{
podNamespace + ".svc.cluster.local", "svc.cluster.local", "cluster.local",
podNamespace + ".svc.cluster.local",
"svc.cluster.local",
"cluster.local",
},
}
}
updatedEnvVars := []corev1.EnvVar{
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":6443"},
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
{Name: "KUBERNETES_SERVICE_PORT", Value: "6443"},
{Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: "6443"},
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":6443"},
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
{Name: "KUBERNETES_PORT_443_TCP_PORT", Value: "6443"},
}
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
}
// handle init containers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
}
}
// overrideEnvVars will override the orig environment variables if found in the updated list
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
if len(updated) == 0 {
return orig
}
// create map for single lookup
updatedEnvVarMap := make(map[string]corev1.EnvVar)
for _, updatedEnvVar := range updated {
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
}
for i, origEnvVar := range orig {
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
orig[i] = updatedEnvVar
}
}
return orig
}
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
// for removing/seeing which virtual cluster resources need to be in the host cluster.
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
var secrets []string
var configMaps []string
var (
secrets []string
configMaps []string
)
for _, volume := range pod.Spec.Volumes {
if volume.Secret != nil {
secrets = append(secrets, volume.Secret.SecretName)
@@ -808,6 +890,7 @@ func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
}
}
}
return secrets, configMaps
}
@@ -828,28 +911,33 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.InitContainers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
pod.Spec.InitContainers[i].Env[j] = envVar
}
}
}
for i, container := range pod.Spec.Containers {
for j, envVar := range container.Env {
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
continue
}
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
if fieldPath == translate.MetadataNameField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
if fieldPath == translate.MetadataNamespaceField {
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
pod.Spec.Containers[i].Env[j] = envVar
}
}
}
for name, value := range pod.Annotations {
if strings.Contains(name, webhook.FieldpathField) {
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
@@ -869,5 +957,6 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
delete(tPod.Annotations, name)
}
}
return nil
}

View File

@@ -0,0 +1,71 @@
package provider
import (
"reflect"
"testing"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
func Test_overrideEnvVars(t *testing.T) {
type args struct {
orig []corev1.EnvVar
new []corev1.EnvVar
}
tests := []struct {
name string
args args
want []corev1.EnvVar
}{
{
name: "orig and new are empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{},
},
want: []v1.EnvVar{},
},
{
name: "only orig is empty",
args: args{
orig: []v1.EnvVar{},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{},
},
{
name: "orig has a matching element",
args: args{
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
},
{
name: "orig have multiple elements",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
},
{
name: "orig and new have multiple elements and some not matching",
args: args{
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
},
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -30,12 +30,14 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
}
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// extracting the tokens data from the secret we just created
virtualSecretKey := types.NamespacedName{
Name: virtualSecret.Name,
@@ -49,9 +51,11 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
if len(virtualSecret.Data) < 3 {
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
}
hostSecret := virtualSecret.DeepCopy()
hostSecret.Type = ""
hostSecret.Annotations = make(map[string]string)
p.Translator.TranslateTo(hostSecret)
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
@@ -59,7 +63,9 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
return err
}
}
p.translateToken(tPod, hostSecret.Name)
return nil
}
@@ -96,6 +102,7 @@ func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
return true
}
}
return false
}

View File

@@ -17,9 +17,9 @@ func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
if !ok {
return nil
}
newSize := remotecommand.TerminalSize{
return &remotecommand.TerminalSize{
Width: size.Width,
Height: size.Height,
}
return &newSize
}

View File

@@ -45,14 +45,17 @@ func (t *ToHostTranslator) TranslateTo(obj client.Object) {
if annotations == nil {
annotations = map[string]string{}
}
annotations[ResourceNameAnnotation] = obj.GetName()
annotations[ResourceNamespaceAnnotation] = obj.GetNamespace()
obj.SetAnnotations(annotations)
// add a label to quickly identify objects owned by a given virtual cluster
labels := obj.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[ClusterNameLabel] = t.ClusterName
obj.SetLabels(labels)
@@ -77,6 +80,7 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
// In this case, we need to have some sort of fallback or error return
name := annotations[ResourceNameAnnotation]
namespace := annotations[ResourceNamespaceAnnotation]
obj.SetName(name)
obj.SetNamespace(namespace)
delete(annotations, ResourceNameAnnotation)
@@ -91,7 +95,6 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
// resource version/UID won't match what's in the virtual cluster.
obj.SetResourceVersion("")
obj.SetUID("")
}
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
@@ -106,5 +109,6 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
nameSuffix := hex.EncodeToString([]byte(nameKey))
return controller.SafeConcatName(namePrefix, nameSuffix)
}

View File

@@ -82,9 +82,12 @@ func main() {
if err := validate(); err != nil {
return err
}
logger = log.New(debug)
return nil
}
if err := app.Run(os.Args); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
}
@@ -111,22 +114,26 @@ func run(clx *cli.Context) error {
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
if err := cluster.AddPodController(ctx, mgr); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
logger.Info("adding clusterset controller")
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
return fmt.Errorf("failed to add the clusterset controller: %v", err)
}
if clusterCIDR == "" {
logger.Info("adding networkpolicy node controller")
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
}
@@ -147,5 +154,6 @@ func validate() error {
return errors.New("invalid value for shared agent image policy")
}
}
return nil
}

View File

@@ -1,6 +0,0 @@
image: rancher/k3k:{{replace "+" "-" build.tag}}
manifests:
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
platform:
architecture: amd64
os: linux

View File

@@ -1,16 +0,0 @@
/*
Copyright YEAR Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -1,46 +0,0 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p bin deploy
if [ "$(uname)" = "Linux" ]; then
OTHER_LINKFLAGS="-extldflags -static -s"
fi
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-aarch64
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3k-kubelet
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet ./k3k-kubelet
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-aarch64
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-s390x ./cli
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-arm64 ./cli
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-freebsd ./cli
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-amd64 ./cli
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-aarch64 ./cli
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi

View File

@@ -1,8 +0,0 @@
#! /bin/sh
cd $(dirname $0)/../
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
# allowDangerousTypes is needed for struct that use floats
controller-gen crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false paths=./pkg/apis/... output:crd:dir=./charts/k3k/crds

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -ex
cd $(dirname $0)/..
CHECKSUM_DIR=${CHECKSUM_DIR:-./bin}
sumfile="${CHECKSUM_DIR}/sha256sum.txt"
echo -n "" > "${sumfile}"
files=$(ls ${CHECKSUM_DIR} | grep -v "sha256sum.txt")
for file in ${files}; do
sha256sum "${CHECKSUM_DIR}/${file}" | sed "s;$(dirname ${CHECKSUM_DIR}/${file})/;;g" >> "${sumfile}"
done
cat "${sumfile}"

11
ops/ci
View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -e
cd $(dirname $0)
./build
./checksum
./test
./validate
./validate-ci
./package

View File

@@ -1,8 +0,0 @@
#!/bin/bash
set -e
cd $(dirname $0)
./build
./test
./package

View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -e
mkdir -p bin dist
if [ -e ./ops/$1 ]; then
./ops/"$@"
else
exec "$@"
fi
chown -R $DAPPER_UID:$DAPPER_GID .

View File

@@ -1,31 +0,0 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
git fetch --tags
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
if [ $(git tag -l "$version") ]; then
echo "tag already exists"
exit 1
fi
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher
# push to gh-pages
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
git config --global user.name "galal-hussein"
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
# push index.yaml to gh-pages
git add index.yaml
git commit -m "add chart-${CHART_TAG} to index.yaml"
git push --force --set-upstream origin HEAD:gh-pages

View File

@@ -1,30 +0,0 @@
#!/bin/bash
set -e
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p dist/artifacts
cp bin/k3k dist/artifacts/k3k${SUFFIX}
cp bin/k3kcli dist/artifacts/k3kcli${SUFFIX}
cp bin/k3k-kubelet dist/artifacts/k3k-kubelet${SUFFIX}
IMAGE=${REPO}/k3k:${TAG}
DOCKERFILE=package/Dockerfile
if [ -e ${DOCKERFILE}.${ARCH} ]; then
DOCKERFILE=${DOCKERFILE}.${ARCH}
fi
docker build -f ${DOCKERFILE} -t ${IMAGE} .
echo Built ${IMAGE}
# todo: This might need to go to it's own repo
IMAGE=${REPO}/k3k:${TAG}-kubelet
DOCKERFILE=package/Dockerfile.kubelet
if [ -e ${DOCKERFILE}.${ARCH} ]; then
DOCKERFILE=${DOCKERFILE}.${ARCH}
fi
docker build -f ${DOCKERFILE} -t ${IMAGE} .
echo Built ${IMAGE}

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p deploy/
cr package --package-path deploy/ charts/k3k

View File

@@ -1,3 +0,0 @@
#!/bin/bash
exec $(dirname $0)/ci

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -e
cd $(dirname $0)/..
if [ -z ${SKIP_TESTS} ]; then
echo Running tests
go test -cover -tags=test ./...
fi

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e
cd $(dirname $0)/..
echo Running validation
PACKAGES="$(go list ./...)"
if ! command -v golangci-lint; then
echo Skipping validation: no golangci-lint available
exit
fi
echo Running validation
echo Running: golangci-lint
golangci-lint run
echo Running: go fmt
test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)"

View File

@@ -1,15 +0,0 @@
#!/bin/bash
set -e
cd $(dirname $0)/..
go generate
source ./ops/version
if [ -n "$DIRTY" ]; then
echo Git is dirty
git status
git diff
exit 1
fi

View File

@@ -1,33 +0,0 @@
#!/bin/bash
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
DIRTY="-dirty"
fi
COMMIT=$(git rev-parse --short HEAD)
GIT_TAG=${TAG:-$(git tag -l --contains HEAD | head -n 1)}
if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then
VERSION=$GIT_TAG
else
VERSION="${COMMIT}${DIRTY}"
fi
if [ -z "$ARCH" ]; then
ARCH=$(go env GOHOSTARCH)
fi
SUFFIX="-${ARCH}"
if [[ $VERSION = "chart*" ]]; then
TAG=${TAG:-${VERSION}}
else
TAG=${TAG:-${VERSION}${SUFFIX}}
fi
REPO=${REPO:-rancher}
if echo $TAG | grep dirty; then
TAG=dev
fi

View File

@@ -25,5 +25,6 @@ func addKnownTypes(s *runtime.Scheme) error {
&ClusterSetList{},
)
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}

View File

@@ -1,86 +0,0 @@
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:object:root=true
type ClusterSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// +kubebuilder:default={}
//
// Spec is the spec of the ClusterSet
Spec ClusterSetSpec `json:"spec"`
// Status is the status of the ClusterSet
Status ClusterSetStatus `json:"status,omitempty"`
}
type ClusterSetSpec struct {
// MaxLimits are the limits that apply to all clusters (server + agent) in the set
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
// DefaultLimits are the limits used for servers/agents when a cluster in the set doesn't provide any
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
// DefaultNodeSelector is the node selector that applies to all clusters (server + agent) in the set
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
// DefaultPriorityClass is the priorityClassName applied to all pods of all clusters in the set
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
// DisableNetworkPolicy is an option that will disable the creation of a default networkpolicy for cluster isolation
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
// +kubebuilder:default={shared}
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
// +kubebuilder:validation:MinItems=1
//
// AllowedNodeTypes are the allowed cluster provisioning modes. Defaults to [shared].
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
}
// +kubebuilder:validation:Enum=privileged;baseline;restricted
//
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
type PodSecurityAdmissionLevel string
const (
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
)
type ClusterSetStatus struct {
// ObservedGeneration was the generation at the time the status was updated.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// LastUpdate is the timestamp when the status was last updated
LastUpdate string `json:"lastUpdateTime,omitempty"`
// Summary is a summary of the status
Summary string `json:"summary,omitempty"`
// Conditions are the invidual conditions for the cluster set
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
type ClusterSetList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []ClusterSet `json:"items"`
}

View File

@@ -10,80 +10,37 @@ import (
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// Cluster defines a virtual Kubernetes cluster managed by k3k.
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
// k3k uses this to provision and manage these virtual clusters.
type Cluster struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// Spec defines the desired state of the Cluster.
//
// +kubebuilder:default={}
// +optional
Spec ClusterSpec `json:"spec"`
Spec ClusterSpec `json:"spec"`
// Status reflects the observed state of the Cluster.
//
// +optional
Status ClusterStatus `json:"status,omitempty"`
}
// ClusterSpec defines the desired state of a virtual Kubernetes cluster.
type ClusterSpec struct {
// Version is a string representing the Kubernetes version to be used by the virtual nodes.
// Version is the K3s version to use for the virtual nodes.
// It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
// If not specified, the Kubernetes version of the host node will be used.
//
// +optional
Version string `json:"version"`
// Servers is the number of K3s pods to run in server (controlplane) mode.
//
// +kubebuilder:default=1
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
// +optional
Servers *int32 `json:"servers"`
// Agents is the number of K3s pods to run in agent (worker) mode.
//
// +kubebuilder:default=0
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
// +optional
Agents *int32 `json:"agents"`
// NodeSelector is the node selector that will be applied to all server/agent pods.
// In "shared" mode the node selector will be applied also to the workloads.
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// PriorityClass is the priorityClassName that will be applied to all server/agent pods.
// In "shared" mode the priorityClassName will be applied also to the workloads.
PriorityClass string `json:"priorityClass,omitempty"`
// Limit is the limits that apply for the server/worker nodes.
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
// TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
// assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
// ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16.
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
ClusterCIDR string `json:"clusterCIDR,omitempty"`
// ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16.
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
ServiceCIDR string `json:"serviceCIDR,omitempty"`
// ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
// Defaults to 10.43.0.10.
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
ClusterDNS string `json:"clusterDNS,omitempty"`
// ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode.
ServerArgs []string `json:"serverArgs,omitempty"`
// AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode.
AgentArgs []string `json:"agentArgs,omitempty"`
// TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use.
TLSSANs []string `json:"tlsSANs,omitempty"`
// Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup.
Addons []Addon `json:"addons,omitempty"`
// Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared"
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
// Defaults to "shared". This field is immutable.
//
// +kubebuilder:default="shared"
// +kubebuilder:validation:Enum=shared;virtual
@@ -91,49 +48,259 @@ type ClusterSpec struct {
// +optional
Mode ClusterMode `json:"mode,omitempty"`
// Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
// persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
// Servers specifies the number of K3s pods to run in server (control plane) mode.
// Must be at least 1. Defaults to 1.
//
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
// +kubebuilder:default=1
// +optional
Servers *int32 `json:"servers"`
// Agents specifies the number of K3s pods to run in agent (worker) mode.
// Must be 0 or greater. Defaults to 0.
// This field is ignored in "shared" mode.
//
// +kubebuilder:default=0
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
// +optional
Agents *int32 `json:"agents"`
// ClusterCIDR is the CIDR range for pod IPs.
// Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
// This field is immutable.
//
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
// +optional
ClusterCIDR string `json:"clusterCIDR,omitempty"`
// ServiceCIDR is the CIDR range for service IPs.
// Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
// This field is immutable.
//
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
// +optional
ServiceCIDR string `json:"serviceCIDR,omitempty"`
// ClusterDNS is the IP address for the CoreDNS service.
// Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
// This field is immutable.
//
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
// +optional
ClusterDNS string `json:"clusterDNS,omitempty"`
// Persistence specifies options for persisting etcd data.
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
// A default StorageClass is required for dynamic persistence.
//
// +kubebuilder:default={type: "dynamic"}
Persistence PersistenceConfig `json:"persistence,omitempty"`
// Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
// clusterIP which is relatively secure, but difficult to access outside of the cluster.
// Expose specifies options for exposing the API server.
// By default, it's only exposed as a ClusterIP.
//
// +optional
Expose *ExposeConfig `json:"expose,omitempty"`
}
// +kubebuilder:validation:Enum=shared;virtual
// +kubebuilder:default="shared"
//
// ClusterMode is the possible provisioning mode of a Cluster.
type ClusterMode string
// NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
// In "shared" mode, this also applies to workloads.
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// +kubebuilder:default="dynamic"
//
// PersistenceMode is the storage mode of a Cluster.
type PersistenceMode string
// PriorityClass specifies the priorityClassName for server/agent pods.
// In "shared" mode, this also applies to workloads.
//
// +optional
PriorityClass string `json:"priorityClass,omitempty"`
const (
SharedClusterMode = ClusterMode("shared")
VirtualClusterMode = ClusterMode("virtual")
EphemeralNodeType = PersistenceMode("ephemeral")
DynamicNodesType = PersistenceMode("dynamic")
)
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
// The Secret must have a "token" field in its data.
//
// +optional
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
type ClusterLimit struct {
// ServerLimit is the limits (cpu/mem) that apply to the server nodes
// TLSSANs specifies subject alternative names for the K3s server certificate.
//
// +optional
TLSSANs []string `json:"tlsSANs,omitempty"`
// ServerArgs specifies ordered key-value pairs for K3s server pods.
// Example: ["--tls-san=example.com"]
//
// +optional
ServerArgs []string `json:"serverArgs,omitempty"`
// AgentArgs specifies ordered key-value pairs for K3s agent pods.
// Example: ["--node-name=my-agent-node"]
//
// +optional
AgentArgs []string `json:"agentArgs,omitempty"`
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
//
// +optional
Addons []Addon `json:"addons,omitempty"`
// ServerLimit specifies resource limits for server nodes.
//
// +optional
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
// WorkerLimit is the limits (cpu/mem) that apply to the agent nodes
// WorkerLimit specifies resource limits for agent nodes.
//
// +optional
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
}
// ClusterMode is the possible provisioning mode of a Cluster.
//
// +kubebuilder:validation:Enum=shared;virtual
// +kubebuilder:default="shared"
type ClusterMode string
const (
// SharedClusterMode represents a cluster that shares resources with the host node.
SharedClusterMode = ClusterMode("shared")
// VirtualClusterMode represents a cluster that runs in a virtual environment.
VirtualClusterMode = ClusterMode("virtual")
)
// PersistenceMode is the storage mode of a Cluster.
//
// +kubebuilder:default="dynamic"
type PersistenceMode string
const (
// EphemeralPersistenceMode represents a cluster with no data persistence.
EphemeralPersistenceMode = PersistenceMode("ephemeral")
// DynamicPersistenceMode represents a cluster with dynamic data persistence using a PVC.
DynamicPersistenceMode = PersistenceMode("dynamic")
)
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
type Addon struct {
// SecretNamespace is the namespace of the Secret.
SecretNamespace string `json:"secretNamespace,omitempty"`
SecretRef string `json:"secretRef,omitempty"`
// SecretRef is the name of the Secret.
SecretRef string `json:"secretRef,omitempty"`
}
// PersistenceConfig specifies options for persisting etcd data.
type PersistenceConfig struct {
// Type specifies the persistence mode.
//
// +kubebuilder:default="dynamic"
Type PersistenceMode `json:"type"`
// StorageClassName is the name of the StorageClass to use for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +optional
StorageClassName *string `json:"storageClassName,omitempty"`
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
// ExposeConfig specifies options for exposing the API server.
type ExposeConfig struct {
// Ingress specifies options for exposing the API server through an Ingress.
//
// +optional
Ingress *IngressConfig `json:"ingress,omitempty"`
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
//
// +optional
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
// NodePort specifies options for exposing the API server through NodePort.
//
// +optional
NodePort *NodePortConfig `json:"nodePort,omitempty"`
}
// IngressConfig specifies options for exposing the API server through an Ingress.
type IngressConfig struct {
// Annotations specifies annotations to add to the Ingress.
//
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// IngressClassName specifies the IngressClass to use for the Ingress.
//
// +optional
IngressClassName string `json:"ingressClassName,omitempty"`
}
// LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
type LoadBalancerConfig struct{}
// NodePortConfig specifies options for exposing the API server through NodePort.
type NodePortConfig struct {
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
//
// +optional
ServerPort *int32 `json:"serverPort,omitempty"`
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
//
// +optional
ServicePort *int32 `json:"servicePort,omitempty"`
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767).
//
// +optional
ETCDPort *int32 `json:"etcdPort,omitempty"`
}
// ClusterStatus reflects the observed state of a Cluster.
type ClusterStatus struct {
// HostVersion is the Kubernetes version of the host node.
//
// +optional
HostVersion string `json:"hostVersion,omitempty"`
// ClusterCIDR is the CIDR range for pod IPs.
//
// +optional
ClusterCIDR string `json:"clusterCIDR,omitempty"`
// ServiceCIDR is the CIDR range for service IPs.
//
// +optional
ServiceCIDR string `json:"serviceCIDR,omitempty"`
// ClusterDNS is the IP address for the CoreDNS service.
//
// +optional
ClusterDNS string `json:"clusterDNS,omitempty"`
// TLSSANs specifies subject alternative names for the K3s server certificate.
//
// +optional
TLSSANs []string `json:"tlsSANs,omitempty"`
// Persistence specifies options for persisting etcd data.
//
// +optional
Persistence PersistenceConfig `json:"persistence,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
// ClusterList is a list of Cluster resources.
type ClusterList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
@@ -141,53 +308,128 @@ type ClusterList struct {
Items []Cluster `json:"items"`
}
type PersistenceConfig struct {
// +kubebuilder:default="dynamic"
Type PersistenceMode `json:"type"`
StorageClassName *string `json:"storageClassName,omitempty"`
StorageRequestSize string `json:"storageRequestSize,omitempty"`
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:object:root=true
// +kubebuilder:validation:XValidation:rule="self.metadata.name == \"default\"",message="Name must match 'default'"
// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name=Display Name,type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
// It allows defining common configurations and constraints for the clusters within the set.
type ClusterSet struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// Spec defines the desired state of the ClusterSet.
//
// +kubebuilder:default={}
Spec ClusterSetSpec `json:"spec"`
// Status reflects the observed state of the ClusterSet.
//
// +optional
Status ClusterSetStatus `json:"status,omitempty"`
}
type ExposeConfig struct {
// ClusterSetSpec defines the desired state of a ClusterSet.
type ClusterSetSpec struct {
// DisplayName is the human-readable name for the set.
//
// +optional
Ingress *IngressConfig `json:"ingress,omitempty"`
DisplayName string `json:"displayName,omitempty"`
// Quota specifies the resource limits for clusters within a clusterset.
//
// +optional
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
// Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
// to set defaults and constraints (min/max)
//
// +optional
NodePort *NodePortConfig `json:"nodePort,omitempty"`
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
//
// +optional
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the set.
//
// +optional
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
// AllowedModeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
//
// +kubebuilder:default={shared}
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
// +kubebuilder:validation:MinItems=1
// +optional
AllowedModeTypes []ClusterMode `json:"allowedModeTypes,omitempty"`
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
//
// +optional
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
// PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace.
//
// +optional
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
}
type IngressConfig struct {
// Annotations is a key value map that will enrich the Ingress annotations
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
//
// +kubebuilder:validation:Enum=privileged;baseline;restricted
type PodSecurityAdmissionLevel string
const (
// PrivilegedPodSecurityAdmissionLevel allows all pods to be admitted.
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
// BaselinePodSecurityAdmissionLevel enforces a baseline level of security restrictions.
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
// RestrictedPodSecurityAdmissionLevel enforces stricter security restrictions.
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
)
// ClusterSetStatus reflects the observed state of a ClusterSet.
type ClusterSetStatus struct {
// ObservedGeneration was the generation at the time the status was updated.
//
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
IngressClassName string `json:"ingressClassName,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// LastUpdate is the timestamp when the status was last updated.
//
// +optional
LastUpdate string `json:"lastUpdateTime,omitempty"`
// Summary is a summary of the status.
//
// +optional
Summary string `json:"summary,omitempty"`
// Conditions are the individual conditions for the cluster set.
//
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
}
type LoadBalancerConfig struct {
Enabled bool `json:"enabled"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:object:root=true
type NodePortConfig struct {
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767)
// +optional
ServerPort *int32 `json:"serverPort,omitempty"`
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767)
// +optional
ServicePort *int32 `json:"servicePort,omitempty"`
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
// If not specified, a port will be allocated (default: 30000-32767)
// +optional
ETCDPort *int32 `json:"etcdPort,omitempty"`
}
// ClusterSetList is a list of ClusterSet resources.
type ClusterSetList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
type ClusterStatus struct {
HostVersion string `json:"hostVersion,omitempty"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Persistence PersistenceConfig `json:"persistence,omitempty"`
Items []ClusterSet `json:"items"`
}

View File

@@ -55,36 +55,6 @@ func (in *Cluster) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterLimit) DeepCopyInto(out *ClusterLimit) {
*out = *in
if in.ServerLimit != nil {
in, out := &in.ServerLimit, &out.ServerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.WorkerLimit != nil {
in, out := &in.WorkerLimit, &out.WorkerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLimit.
func (in *ClusterLimit) DeepCopy() *ClusterLimit {
if in == nil {
return nil
}
out := new(ClusterLimit)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
*out = *in
@@ -182,16 +152,14 @@ func (in *ClusterSetList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
*out = *in
if in.MaxLimits != nil {
in, out := &in.MaxLimits, &out.MaxLimits
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
if in.Quota != nil {
in, out := &in.Quota, &out.Quota
*out = new(v1.ResourceQuotaSpec)
(*in).DeepCopyInto(*out)
}
if in.DefaultLimits != nil {
in, out := &in.DefaultLimits, &out.DefaultLimits
*out = new(ClusterLimit)
if in.Limit != nil {
in, out := &in.Limit, &out.Limit
*out = new(v1.LimitRangeSpec)
(*in).DeepCopyInto(*out)
}
if in.DefaultNodeSelector != nil {
@@ -201,8 +169,8 @@ func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
(*out)[key] = val
}
}
if in.AllowedNodeTypes != nil {
in, out := &in.AllowedNodeTypes, &out.AllowedNodeTypes
if in.AllowedModeTypes != nil {
in, out := &in.AllowedModeTypes, &out.AllowedModeTypes
*out = make([]ClusterMode, len(*in))
copy(*out, *in)
}
@@ -260,6 +228,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(int32)
**out = **in
}
in.Persistence.DeepCopyInto(&out.Persistence)
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
@@ -267,16 +241,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*out)[key] = val
}
}
if in.Limit != nil {
in, out := &in.Limit, &out.Limit
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
if in.TokenSecretRef != nil {
in, out := &in.TokenSecretRef, &out.TokenSecretRef
*out = new(v1.SecretReference)
**out = **in
}
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ServerArgs != nil {
in, out := &in.ServerArgs, &out.ServerArgs
*out = make([]string, len(*in))
@@ -287,21 +261,24 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
copy(*out, *in)
}
in.Persistence.DeepCopyInto(&out.Persistence)
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
if in.ServerLimit != nil {
in, out := &in.ServerLimit, &out.ServerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.WorkerLimit != nil {
in, out := &in.WorkerLimit, &out.WorkerLimit
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}

View File

@@ -40,6 +40,7 @@ func CreateClientCertKey(commonName string, organization []string, altNames *cer
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
@@ -59,6 +60,7 @@ func generateKey() (data []byte, err error) {
func AddSANs(sans []string) certutil.AltNames {
var altNames certutil.AltNames
for _, san := range sans {
ip := net.ParseIP(san)
if ip == nil {
@@ -67,5 +69,6 @@ func AddSANs(sans []string) certutil.AltNames {
altNames.IPs = append(altNames.IPs, ip)
}
}
return altNames
}

View File

@@ -6,9 +6,9 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
@@ -42,14 +42,21 @@ func configSecretName(clusterName string) string {
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
log := ctrl.LoggerFrom(ctx)
result, err := controllerutil.CreateOrUpdate(ctx, cfg.client, obj, func() error {
return controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme)
})
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
if result != controllerutil.OperationResultNone {
key := client.ObjectKeyFromObject(obj)
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key, "result, result")
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
return err
}
return err
if err := cfg.client.Create(ctx, obj); err != nil {
if apierrors.IsAlreadyExists(err) {
return cfg.client.Update(ctx, obj)
}
return err
}
return nil
}

View File

@@ -19,7 +19,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -58,11 +57,11 @@ func (s *SharedAgent) EnsureResources(ctx context.Context) error {
s.role(ctx),
s.roleBinding(ctx),
s.service(ctx),
s.deployment(ctx),
s.daemonset(ctx),
s.dnsService(ctx),
s.webhookTLS(ctx),
); err != nil {
return fmt.Errorf("failed to ensure some resources: %w\n", err)
return fmt.Errorf("failed to ensure some resources: %w", err)
}
return nil
@@ -97,6 +96,7 @@ func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) s
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
}
return fmt.Sprintf(`clusterName: %s
clusterNamespace: %s
serverIP: %s
@@ -106,16 +106,16 @@ version: %s`,
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
}
func (s *SharedAgent) deployment(ctx context.Context) error {
func (s *SharedAgent) daemonset(ctx context.Context) error {
labels := map[string]string{
"cluster": s.cluster.Name,
"type": "agent",
"mode": "shared",
}
deploy := &apps.Deployment{
deploy := &apps.DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
@@ -123,7 +123,7 @@ func (s *SharedAgent) deployment(ctx context.Context) error {
Namespace: s.cluster.Namespace,
Labels: labels,
},
Spec: apps.DeploymentSpec{
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
@@ -140,10 +140,9 @@ func (s *SharedAgent) deployment(ctx context.Context) error {
}
func (s *SharedAgent) podSpec() v1.PodSpec {
var limit v1.ResourceList
return v1.PodSpec{
ServiceAccountName: s.Name(),
NodeSelector: s.cluster.Spec.NodeSelector,
Volumes: []v1.Volume{
{
Name: "config",
@@ -188,7 +187,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
Image: s.image,
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Resources: v1.ResourceRequirements{
Limits: limit,
Limits: v1.ResourceList{},
},
Args: []string{
"--config",
@@ -345,6 +344,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
Resources: []string{"clusters"},
Verbs: []string{"get", "watch", "list"},
},
{
APIGroups: []string{"coordination.k8s.io"},
Resources: []string{"leases"},
Verbs: []string{"*"},
},
},
}
@@ -390,7 +394,7 @@ func (s *SharedAgent) webhookTLS(ctx context.Context) error {
},
}
key := client.ObjectKeyFromObject(webhookSecret)
key := ctrlruntimeclient.ObjectKeyFromObject(webhookSecret)
if err := s.client.Get(ctx, key, webhookSecret); err != nil {
if !apierrors.IsNotFound(err) {
return err
@@ -402,6 +406,7 @@ func (s *SharedAgent) webhookTLS(ctx context.Context) error {
}
altNames := []string{s.Name(), s.cluster.Name}
webhookCert, webhookKey, err := newWebhookCerts(s.Name(), altNames, caPrivateKeyPEM, caCertPEM)
if err != nil {
return err

View File

@@ -16,6 +16,7 @@ func Test_sharedAgentData(t *testing.T) {
ip string
token string
}
tests := []struct {
name string
args args
@@ -100,6 +101,7 @@ func Test_sharedAgentData(t *testing.T) {
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip)

View File

@@ -41,7 +41,7 @@ func (v *VirtualAgent) EnsureResources(ctx context.Context) error {
v.config(ctx),
v.deployment(ctx),
); err != nil {
return fmt.Errorf("failed to ensure some resources: %w\n", err)
return fmt.Errorf("failed to ensure some resources: %w", err)
}
return nil
@@ -81,6 +81,7 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
image := controller.K3SImage(v.cluster)
const name = "k3k-agent"
selector := metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": v.cluster.Name,
@@ -116,7 +117,9 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec {
var limit v1.ResourceList
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
@@ -225,5 +228,12 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
},
}
// specify resource limits if specified for the servers.
if v.cluster.Spec.WorkerLimit != nil {
podSpec.Containers[0].Resources = v1.ResourceRequirements{
Limits: v.cluster.Spec.WorkerLimit,
}
}
return podSpec
}

View File

@@ -12,6 +12,7 @@ func Test_virtualAgentData(t *testing.T) {
serviceIP string
token string
}
tests := []struct {
name string
args args
@@ -30,6 +31,7 @@ func Test_virtualAgentData(t *testing.T) {
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := virtualAgentData(tt.args.serviceIP, tt.args.token)

View File

@@ -4,17 +4,20 @@ import (
"context"
"errors"
"fmt"
"net"
"reflect"
"strings"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -39,8 +42,10 @@ const (
maxConcurrentReconciles = 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
defaultVirtualClusterCIDR = "10.52.0.0/16"
defaultVirtualServiceCIDR = "10.53.0.0/16"
defaultSharedClusterCIDR = "10.42.0.0/16"
defaultSharedServiceCIDR = "10.43.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
)
@@ -118,6 +123,11 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
log.Info("server not ready, requeueing")
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
}
return reconcile.Result{}, reconcilerErr
}
@@ -170,18 +180,44 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
}
}
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
if err != nil {
log.Error(err, "error while looking up Cluster Service CIDR")
cluster.Status.ServiceCIDR = defaultSharedServiceCIDR
}
}
// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
}
}
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
return err
}
service, err := c.ensureClusterService(ctx, cluster)
if err != nil {
return err
}
serviceIP := service.Spec.ClusterIP
if err := c.createClusterConfigs(ctx, cluster, s, serviceIP); err != nil {
@@ -232,8 +268,10 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
bootstrapSecret.Data = map[string][]byte{
"bootstrap": bootstrapData,
}
return nil
})
return err
}
@@ -259,9 +297,11 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
@@ -271,6 +311,84 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",
APIVersion: "networking.k8s.io/v1",
},
Spec: networkingv1.NetworkPolicySpec{
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
networkingv1.PolicyTypeEgress,
},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: "0.0.0.0/0",
Except: []string{cluster.Status.ClusterCIDR},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": cluster.Namespace,
},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
},
},
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kube-dns",
},
},
},
},
},
},
},
}
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
return err
}
currentNetworkPolicy.Spec = expectedNetworkPolicy.Spec
return nil
})
if err != nil {
return err
}
key := client.ObjectKeyFromObject(currentNetworkPolicy)
if result != controllerutil.OperationResultNone {
log.Info("cluster network policy updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
@@ -284,8 +402,10 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
}
currentService.Spec = expectedService.Spec
return nil
})
if err != nil {
return nil, err
}
@@ -321,6 +441,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
return nil
})
if err != nil {
return err
}
@@ -341,23 +462,31 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
serverStatefulSet, err := server.StatefulServer(ctx)
expectedServerStatefulSet, err := server.StatefulServer(ctx)
if err != nil {
return err
}
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, serverStatefulSet, func() error {
return controllerutil.SetControllerReference(cluster, serverStatefulSet, c.Scheme)
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
return err
}
currentServerStatefulSet.Spec = expectedServerStatefulSet.Spec
return nil
})
if result != controllerutil.OperationResultNone {
key := client.ObjectKeyFromObject(serverStatefulSet)
key := client.ObjectKeyFromObject(currentServerStatefulSet)
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
}
@@ -373,6 +502,7 @@ func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluste
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
found := false
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
found = true
@@ -407,5 +537,83 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
}
return nil
}
// lookupServiceCIDR attempts to determine the cluster's service CIDR.
// It first attempts to create a failing Service (with an invalid cluster IP)and extracts the expected CIDR from the resulting error.
// If that fails, it searches the 'kube-apiserver' Pod's arguments for the --service-cluster-ip-range flag.
func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, error) {
log := ctrl.LoggerFrom(ctx)
// Try to look for the serviceCIDR creating a failing service.
// The error should contain the expected serviceCIDR
log.Info("looking up serviceCIDR from a failing service creation")
failingSvc := v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
Spec: v1.ServiceSpec{ClusterIP: "1.1.1.1"},
}
if err := c.Client.Create(ctx, &failingSvc); err != nil {
splittedErrMsg := strings.Split(err.Error(), "The range of valid IPs is ")
if len(splittedErrMsg) > 1 {
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
if err != nil {
return "", err
}
return serviceCIDRAddr.String(), nil
}
}
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
log.Info("looking up serviceCIDR from kube-apiserver pod")
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{
"component": "kube-apiserver",
"tier": "control-plane",
})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: "kube-system"}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
if !apierrors.IsNotFound(err) {
return "", err
}
}
if len(podList.Items) > 0 {
apiServerPod := podList.Items[0]
apiServerArgs := apiServerPod.Spec.Containers[0].Args
for _, arg := range apiServerArgs {
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
// validate serviceCIDR
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
if err != nil {
log.Error(err, "serviceCIDR is not valid")
break
}
return serviceCIDRAddr.String(), nil
}
}
}
log.Info("cannot find serviceCIDR from lookup")
return "", nil
}

View File

@@ -34,6 +34,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
@@ -47,10 +48,12 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove finalizer from the cluster and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
@@ -63,6 +66,7 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
var cleanedSubjects []rbacv1.Subject
for _, subject := range clusterRoleBinding.Subjects {
if subject.Name != subjectName || subject.Namespace != cluster.Namespace {
cleanedSubjects = append(cleanedSubjects, subject)
@@ -75,5 +79,6 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus
}
clusterRoleBinding.Subjects = cleanedSubjects
return c.Client.Update(ctx, clusterRoleBinding)
}

View File

@@ -6,11 +6,13 @@ import (
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -18,7 +20,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster Controller", func() {
var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), func() {
Context("creating a Cluster", func() {
@@ -55,7 +57,7 @@ var _ = Describe("Cluster Controller", func() {
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
// TOFIX
//Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicNodesType))
// Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
@@ -70,9 +72,27 @@ var _ = Describe("Cluster Controller", func() {
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(expectedHostVersion))
// check NetworkPolicy
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
}
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(expectedNetworkPolicy), expectedNetworkPolicy)
Expect(err).To(Not(HaveOccurred()))
spec := expectedNetworkPolicy.Spec
Expect(spec.PolicyTypes).To(HaveLen(2))
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
Expect(spec.Ingress).To(Equal([]networkingv1.NetworkPolicyIngressRule{{}}))
})
When("exposing the cluster with nodePort and custom posrts", func() {
When("exposing the cluster with nodePort and custom ports", func() {
It("will have a NodePort service with the specified port exposed", func() {
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{

View File

@@ -66,16 +66,20 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
if len(s) < 1 {
return reconcile.Result{}, nil
}
if s[0] != "k3k" {
return reconcile.Result{}, nil
}
clusterName := s[1]
var cluster v1alpha1.Cluster
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
@@ -84,14 +88,17 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if len(podList.Items) == 1 {
return reconcile.Result{}, nil
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
@@ -115,16 +122,20 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
@@ -143,11 +154,13 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
// remove our finalizer from the list and update it.
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
return p.Client.Update(ctx, pod)
@@ -164,9 +177,11 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
@@ -181,6 +196,7 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
@@ -190,6 +206,7 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
@@ -206,6 +223,7 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
@@ -215,6 +233,7 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
@@ -224,9 +243,11 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
@@ -237,18 +258,23 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}

View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"net/http"
"syscall"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
@@ -17,6 +18,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
var ErrServerNotReady = errors.New("server not ready")
type ControlRuntimeBootstrap struct {
ServerCA content `json:"serverCA"`
ServerCAKey content `json:"serverCAKey"`
@@ -45,7 +48,6 @@ func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, t
}
return json.Marshal(bootstrap)
}
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
@@ -64,10 +66,15 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
resp, err := client.Do(req)
if err != nil {
if errors.Is(err, syscall.ECONNREFUSED) {
return nil, ErrServerNotReady
}
return nil, err
}
defer resp.Body.Close()
@@ -91,6 +98,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
@@ -98,6 +106,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
@@ -105,6 +114,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
@@ -112,6 +122,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
//etcd-ca
@@ -119,6 +130,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ETCDServerCA.Content = string(decoded)
//etcd-ca-key
@@ -126,6 +138,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
if err != nil {
return err
}
bootstrap.ETCDServerCAKey.Content = string(decoded)
return nil
@@ -162,5 +175,6 @@ func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.
var bootstrap ControlRuntimeBootstrap
err := json.Unmarshal(bootstrapData, &bootstrap)
return &bootstrap, err
}

View File

@@ -22,6 +22,7 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
if init {
config = initConfigData(s.cluster, s.token)
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
@@ -52,21 +53,26 @@ func serverOptions(cluster *v1alpha1.Cluster, token string) string {
if token != "" {
opts = "token: " + token + "\n"
}
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Status.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Status.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
if cluster.Spec.Mode != agent.VirtualNodeMode {
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage"
}
@@ -79,5 +85,6 @@ func configSecretName(clusterName string, init bool) string {
if !init {
return controller.SafeConcatNameWithPrefix(clusterName, configName)
}
return controller.SafeConcatNameWithPrefix(clusterName, initConfigName)
}

View File

@@ -46,10 +46,6 @@ func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *S
}
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
var limit v1.ResourceList
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
limit = s.cluster.Spec.Limit.ServerLimit
}
podSpec := v1.PodSpec{
NodeSelector: s.cluster.Spec.NodeSelector,
PriorityClassName: s.cluster.Spec.PriorityClass,
@@ -117,9 +113,6 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
{
Name: name,
Image: image,
Resources: v1.ResourceRequirements{
Limits: limit,
},
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@@ -218,6 +211,14 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
Privileged: ptr.To(true),
}
}
// specify resource limits if specified for the servers.
if s.cluster.Spec.ServerLimit != nil {
podSpec.Containers[0].Resources = v1.ResourceRequirements{
Limits: s.cluster.Spec.ServerLimit,
}
}
return podSpec
}
@@ -228,18 +229,22 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
err error
persistent bool
)
image := controller.K3SImage(s.cluster)
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
replicas = *s.cluster.Spec.Servers
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
persistent = true
pvClaim = s.setupDynamicPersistence()
}
var volumes []v1.Volume
var volumeMounts []v1.VolumeMount
var (
volumes []v1.Volume
volumeMounts []v1.VolumeMount
)
for _, addon := range s.cluster.Spec.Addons {
namespace := k3kSystemNamespace
if addon.SecretNamespace != "" {
@@ -306,6 +311,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
if err != nil {
return nil, err
}
podSpec := s.podSpec(image, name, persistent, startupCommand)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
@@ -332,7 +338,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
},
}
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
}
@@ -359,7 +365,6 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
},
},
}
}
func (s *Server) setupStartCommand() (string, error) {
@@ -369,10 +374,12 @@ func (s *Server) setupStartCommand() (string, error) {
if *s.cluster.Spec.Servers > 1 {
tmpl = HAServerTemplate
}
tmplCmd, err := template.New("").Parse(tmpl)
if err != nil {
return "", err
}
if err := tmplCmd.Execute(&output, map[string]string{
"ETCD_DIR": "/var/lib/rancher/k3s/server/db/etcd",
"INIT_CONFIG": "/opt/rancher/k3s/init/config.yaml",
@@ -381,5 +388,6 @@ func (s *Server) setupStartCommand() (string, error) {
}); err != nil {
return "", err
}
return output.String(), nil
}

View File

@@ -54,9 +54,11 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
if nodePortConfig.ServerPort != nil {
k3sServerPort.NodePort = *nodePortConfig.ServerPort
}
if nodePortConfig.ServicePort != nil {
k3sServicePort.NodePort = *nodePortConfig.ServicePort
}
if nodePortConfig.ETCDPort != nil {
etcdPort.NodePort = *nodePortConfig.ETCDPort
}

View File

@@ -26,13 +26,17 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
Name: cluster.Spec.TokenSecretRef.Name,
Namespace: cluster.Spec.TokenSecretRef.Namespace,
}
var tokenSecret v1.Secret
if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}
@@ -75,15 +79,16 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
}
return token, err
}
func random(size int) (string, error) {
token := make([]byte, size)
_, err := rand.Read(token)
if err != nil {
return "", err
}
return hex.EncodeToString(token), err
}

View File

@@ -16,7 +16,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
@@ -32,7 +31,7 @@ const (
)
type ClusterSetReconciler struct {
Client ctrlruntimeclient.Client
Client client.Client
Scheme *runtime.Scheme
ClusterCIDR string
}
@@ -49,6 +48,7 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.ClusterSet{}).
Owns(&networkingv1.NetworkPolicy{}).
Owns(&v1.ResourceQuota{}).
WithOptions(controller.Options{
MaxConcurrentReconciles: maxConcurrentReconciles,
}).
@@ -59,48 +59,33 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
).
Watches(
&v1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(sameNamespaceEventHandler(reconciler)),
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
).
Complete(&reconciler)
}
// namespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
// namespaceEventHandler will enqueue a reconcile request for the ClusterSet in the given namespace
func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
return func(ctx context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request
var set v1alpha1.ClusterSetList
// if the object is a Namespace, use the name as the namespace
namespace := obj.GetName()
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetName()))
for _, clusterSet := range set.Items {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: clusterSet.Name,
Namespace: obj.GetName(),
},
})
// if the object is a namespaced resource, use the namespace
if obj.GetNamespace() != "" {
namespace = obj.GetNamespace()
}
return requests
}
}
// sameNamespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
func sameNamespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
return func(ctx context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request
var set v1alpha1.ClusterSetList
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetNamespace()))
for _, clusterSet := range set.Items {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: clusterSet.Name,
Namespace: obj.GetNamespace(),
},
})
key := types.NamespacedName{
Name: "default",
Namespace: namespace,
}
return requests
var clusterSet v1alpha1.ClusterSet
if err := reconciler.Client.Get(ctx, key, &clusterSet); err != nil {
return nil
}
return []reconcile.Request{{NamespacedName: key}}
}
}
@@ -125,42 +110,56 @@ func (c *ClusterSetReconciler) Reconcile(ctx context.Context, req reconcile.Requ
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if err := c.reconcileNetworkPolicy(ctx, &clusterSet); err != nil {
return reconcile.Result{}, err
orig := clusterSet.DeepCopy()
reconcilerErr := c.reconcileClusterSet(ctx, &clusterSet)
// update Status if needed
if !reflect.DeepEqual(orig.Status, clusterSet.Status) {
if err := c.Client.Status().Update(ctx, &clusterSet); err != nil {
return reconcile.Result{}, err
}
}
if err := c.reconcileNamespacePodSecurityLabels(ctx, &clusterSet); err != nil {
return reconcile.Result{}, err
// if there was an error during the reconciliation, return
if reconcilerErr != nil {
return reconcile.Result{}, reconcilerErr
}
if err := c.reconcileClusters(ctx, &clusterSet); err != nil {
return reconcile.Result{}, err
// update ClusterSet if needed
if !reflect.DeepEqual(orig.Spec, clusterSet.Spec) {
if err := c.Client.Update(ctx, &clusterSet); err != nil {
return reconcile.Result{}, err
}
}
// TODO: Add resource quota for clustersets
// if clusterSet.Spec.MaxLimits != nil {
// quota := v1.ResourceQuota{
// ObjectMeta: metav1.ObjectMeta{
// Name: "clusterset-quota",
// Namespace: clusterSet.Namespace,
// OwnerReferences: []metav1.OwnerReference{
// {
// UID: clusterSet.UID,
// Name: clusterSet.Name,
// APIVersion: clusterSet.APIVersion,
// Kind: clusterSet.Kind,
// },
// },
// },
// }
// quota.Spec.Hard = clusterSet.Spec.MaxLimits
// if err := c.Client.Create(ctx, &quota); err != nil {
// return reconcile.Result{}, fmt.Errorf("unable to create resource quota from cluster set: %w", err)
// }
// }
return reconcile.Result{}, nil
}
func (c *ClusterSetReconciler) reconcileClusterSet(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
if err := c.reconcileNetworkPolicy(ctx, clusterSet); err != nil {
return err
}
if err := c.reconcileNamespacePodSecurityLabels(ctx, clusterSet); err != nil {
return err
}
if err := c.reconcileLimit(ctx, clusterSet); err != nil {
return err
}
if err := c.reconcileQuota(ctx, clusterSet); err != nil {
return err
}
if err := c.reconcileClusters(ctx, clusterSet); err != nil {
return err
}
return nil
}
func (c *ClusterSetReconciler) reconcileNetworkPolicy(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling NetworkPolicy")
@@ -199,6 +198,7 @@ func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.Cluste
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
for _, node := range nodeList.Items {
cidrList = append(cidrList, node.Spec.PodCIDRs...)
}
@@ -261,6 +261,7 @@ func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.C
log.Info("reconciling Namespace")
var ns v1.Namespace
key := types.NamespacedName{Name: clusterSet.Namespace}
if err := c.Client.Get(ctx, key, &ns); err != nil {
return err
@@ -295,8 +296,10 @@ func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.C
log.V(1).Info("labels changed, updating namespace")
ns.Labels = newLabels
return c.Client.Update(ctx, &ns)
}
return nil
}
@@ -305,7 +308,7 @@ func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, clusterSet
log.Info("reconciling Clusters")
var clusters v1alpha1.ClusterList
if err := c.Client.List(ctx, &clusters, ctrlruntimeclient.InNamespace(clusterSet.Namespace)); err != nil {
if err := c.Client.List(ctx, &clusters, client.InNamespace(clusterSet.Namespace)); err != nil {
return err
}
@@ -330,3 +333,98 @@ func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, clusterSet
return err
}
func (c *ClusterSetReconciler) reconcileQuota(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
if clusterSet.Spec.Quota == nil {
// check if resourceQuota object exists and deletes it.
var toDeleteResourceQuota v1.ResourceQuota
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: clusterSet.Namespace,
}
if err := c.Client.Get(ctx, key, &toDeleteResourceQuota); err != nil {
return client.IgnoreNotFound(err)
}
return c.Client.Delete(ctx, &toDeleteResourceQuota)
}
// create/update resource Quota
resourceQuota := resourceQuota(clusterSet)
if err := ctrl.SetControllerReference(clusterSet, &resourceQuota, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &resourceQuota); err != nil {
if apierrors.IsAlreadyExists(err) {
return c.Client.Update(ctx, &resourceQuota)
}
}
return nil
}
func resourceQuota(clusterSet *v1alpha1.ClusterSet) v1.ResourceQuota {
return v1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: clusterSet.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "ResourceQuota",
APIVersion: "v1",
},
Spec: *clusterSet.Spec.Quota,
}
}
func (c *ClusterSetReconciler) reconcileLimit(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
log := ctrl.LoggerFrom(ctx)
log.Info("Reconciling ClusterSet Limit")
// delete limitrange if spec.limits isnt specified.
if clusterSet.Spec.Limit == nil {
var toDeleteLimitRange v1.LimitRange
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: clusterSet.Namespace,
}
if err := c.Client.Get(ctx, key, &toDeleteLimitRange); err != nil {
return client.IgnoreNotFound(err)
}
return c.Client.Delete(ctx, &toDeleteLimitRange)
}
limitRange := limitRange(clusterSet)
if err := ctrl.SetControllerReference(clusterSet, &limitRange, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &limitRange); err != nil {
if apierrors.IsAlreadyExists(err) {
return c.Client.Update(ctx, &limitRange)
}
}
return nil
}
func limitRange(clusterSet *v1alpha1.ClusterSet) v1.LimitRange {
return v1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: clusterSet.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "LimitRange",
APIVersion: "v1",
},
Spec: *clusterSet.Spec.Limit,
}
}

View File

@@ -8,11 +8,11 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
@@ -20,7 +20,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = Describe("ClusterSet Controller", func() {
var _ = Describe("ClusterSet Controller", Label("controller"), Label("ClusterSet"), func() {
Context("creating a ClusterSet", func() {
@@ -29,34 +29,62 @@ var _ = Describe("ClusterSet Controller", func() {
)
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
When("created with a default spec", func() {
It("should have only the 'shared' allowedNodeTypes", func() {
It("should have only the 'shared' allowedModeTypes", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
Expect(allowedModeTypes).To(HaveLen(1))
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
})
It("should not be able to create a cluster with a non 'default' name", func() {
err := k8sClient.Create(ctx, &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "another-name",
Namespace: namespace,
},
})
Expect(err).To(HaveOccurred())
})
It("should not be able to create two ClusterSets in the same namespace", func() {
err := k8sClient.Create(ctx, &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
})
Expect(err).To(Not(HaveOccurred()))
err = k8sClient.Create(ctx, &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default-2",
Namespace: namespace,
},
})
Expect(err).To(HaveOccurred())
})
It("should create a NetworkPolicy", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
@@ -118,9 +146,9 @@ var _ = Describe("ClusterSet Controller", func() {
When("created with DisableNetworkPolicy", func() {
It("should not create a NetworkPolicy if true", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DisableNetworkPolicy: true,
@@ -147,9 +175,9 @@ var _ = Describe("ClusterSet Controller", func() {
It("should delete the NetworkPolicy if changed to false", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
@@ -191,9 +219,9 @@ var _ = Describe("ClusterSet Controller", func() {
It("should recreate the NetworkPolicy if deleted", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
}
@@ -242,12 +270,12 @@ var _ = Describe("ClusterSet Controller", func() {
When("created specifying the mode", func() {
It("should have the 'virtual' mode if specified", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
AllowedModeTypes: []v1alpha1.ClusterMode{
v1alpha1.VirtualClusterMode,
},
},
@@ -256,19 +284,19 @@ var _ = Describe("ClusterSet Controller", func() {
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
Expect(allowedModeTypes).To(HaveLen(1))
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
})
It("should have both modes if specified", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
AllowedModeTypes: []v1alpha1.ClusterMode{
v1alpha1.SharedClusterMode,
v1alpha1.VirtualClusterMode,
},
@@ -278,7 +306,7 @@ var _ = Describe("ClusterSet Controller", func() {
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
Expect(allowedModeTypes).To(HaveLen(2))
Expect(allowedModeTypes).To(ContainElements(
v1alpha1.SharedClusterMode,
@@ -288,12 +316,12 @@ var _ = Describe("ClusterSet Controller", func() {
It("should fail for a non-existing mode", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
AllowedNodeTypes: []v1alpha1.ClusterMode{
AllowedModeTypes: []v1alpha1.ClusterMode{
v1alpha1.SharedClusterMode,
v1alpha1.VirtualClusterMode,
v1alpha1.ClusterMode("non-existing"),
@@ -315,9 +343,9 @@ var _ = Describe("ClusterSet Controller", func() {
)
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
PodSecurityAdmissionLevel: &privileged,
@@ -327,7 +355,7 @@ var _ = Describe("ClusterSet Controller", func() {
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var ns corev1.Namespace
var ns v1.Namespace
// Check privileged
@@ -418,9 +446,9 @@ var _ = Describe("ClusterSet Controller", func() {
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
PodSecurityAdmissionLevel: &privileged,
@@ -430,7 +458,7 @@ var _ = Describe("ClusterSet Controller", func() {
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var ns corev1.Namespace
var ns v1.Namespace
// wait a bit for the namespace to be updated
Eventually(func() bool {
@@ -469,9 +497,9 @@ var _ = Describe("ClusterSet Controller", func() {
When("a cluster in the same namespace is present", func() {
It("should update it if needed", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultPriorityClass: "foobar",
@@ -482,7 +510,7 @@ var _ = Describe("ClusterSet Controller", func() {
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
@@ -510,9 +538,9 @@ var _ = Describe("ClusterSet Controller", func() {
It("should update the nodeSelector", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
@@ -523,7 +551,7 @@ var _ = Describe("ClusterSet Controller", func() {
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
@@ -551,9 +579,9 @@ var _ = Describe("ClusterSet Controller", func() {
It("should update the nodeSelector if changed", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
@@ -564,7 +592,7 @@ var _ = Describe("ClusterSet Controller", func() {
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
@@ -622,9 +650,9 @@ var _ = Describe("ClusterSet Controller", func() {
When("a cluster in a different namespace is present", func() {
It("should not be update", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
DefaultPriorityClass: "foobar",
@@ -634,12 +662,12 @@ var _ = Describe("ClusterSet Controller", func() {
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
namespace2 := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace2 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
err = k8sClient.Create(ctx, namespace2)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace2.Name,
},
@@ -666,5 +694,134 @@ var _ = Describe("ClusterSet Controller", func() {
Should(BeTrue())
})
})
When("created with ResourceQuota", func() {
It("should create resourceQuota if Quota is enabled", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var resourceQuota v1.ResourceQuota
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, &resourceQuota)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeNil())
Expect(resourceQuota.Spec.Hard.Cpu().String()).To(BeEquivalentTo("800m"))
Expect(resourceQuota.Spec.Hard.Memory().String()).To(BeEquivalentTo("1Gi"))
})
It("should delete the ResourceQuota if Quota is deleted", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var resourceQuota v1.ResourceQuota
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, &resourceQuota)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeNil())
clusterSet.Spec.Quota = nil
err = k8sClient.Update(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
// wait for a bit for the resourceQuota to be deleted
Eventually(func() bool {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
err := k8sClient.Get(ctx, key, &resourceQuota)
return apierrors.IsNotFound(err)
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
It("should create resourceQuota if Quota is enabled", func() {
clusterSet := &v1alpha1.ClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSetSpec{
Limit: &v1.LimitRangeSpec{
Limits: []v1.LimitRangeItem{
{
Type: v1.LimitTypeContainer,
DefaultRequest: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("500m"),
},
},
},
},
},
}
err := k8sClient.Create(ctx, clusterSet)
Expect(err).To(Not(HaveOccurred()))
var limitRange v1.LimitRange
Eventually(func() error {
key := types.NamespacedName{
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
Namespace: namespace,
}
return k8sClient.Get(ctx, key, &limitRange)
}).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeNil())
// make sure that default limit range has the default requet values.
Expect(limitRange.Spec.Limits).ShouldNot(BeEmpty())
cpu := limitRange.Spec.Limits[0].DefaultRequest.Cpu().String()
Expect(cpu).To(BeEquivalentTo("500m"))
})
})
})
})

View File

@@ -68,6 +68,7 @@ func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetLi
log.Info("ensuring network policies")
var setNetworkPolicy *networkingv1.NetworkPolicy
for _, cs := range clusterSetList.Items {
if cs.Spec.DisableNetworkPolicy {
continue
@@ -78,14 +79,17 @@ func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetLi
var err error
setNetworkPolicy, err = netpol(ctx, "", &cs, n.Client)
if err != nil {
return err
}
log.Info("new NetworkPolicy for clusterset")
if err := n.Client.Update(ctx, setNetworkPolicy); err != nil {
return err
}
}
return nil
}

View File

@@ -51,7 +51,9 @@ func SafeConcatName(name ...string) string {
if len(fullPath) < 64 {
return fullPath
}
digest := sha256.Sum256([]byte(fullPath))
// since we cut the string in the middle, the last char may not be compatible with what is expected in k8s
// we are checking and if necessary removing the last char
c := fullPath[56]

View File

@@ -40,6 +40,7 @@ func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster
if err != nil {
return nil, err
}
serverCACert := []byte(bootstrapData.ServerCA.Content)
adminCert, adminKey, err := certs.CreateClientCertKey(
@@ -110,6 +111,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
expose := cluster.Spec.Expose
if expose != nil && expose.Ingress != nil {
var k3kIngress networkingv1.Ingress
ingressKey := types.NamespacedName{
Name: server.IngressName(cluster.Name),
Namespace: cluster.Namespace,
@@ -118,6 +120,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
if err := client.Get(ctx, ingressKey, &k3kIngress); err != nil {
return "", err
}
url = fmt.Sprintf("https://%s", k3kIngress.Spec.Rules[0].Host)
}

View File

@@ -1,17 +1,11 @@
#!/bin/bash
set -e pipefail
set -eou pipefail
TAG=$(git describe --tag --always --match="v[0-9]*")
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${VERSION}\""
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
TAG="${TAG}-dirty"
fi
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${TAG}\""
echo "Building k3k..."
echo "Current TAG: ${TAG}"
echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
echo "Current TAG: ${VERSION} "
export CGO_ENABLED=0
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k
@@ -19,6 +13,3 @@ GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-
# build the cli for the local OS and ARCH
go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli
docker build -f package/Dockerfile -t rancher/k3k:dev -t rancher/k3k:${TAG} .
docker build -f package/Dockerfile.kubelet -t rancher/k3k-kubelet:dev -t rancher/k3k-kubelet:${TAG} .

View File

@@ -0,0 +1,96 @@
package k3k_test
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
var (
cluster1 *VirtualCluster
cluster2 *VirtualCluster
)
BeforeEach(func() {
clusters := NewVirtualClusters(2)
cluster1 = clusters[0]
cluster2 = clusters[1]
})
AfterEach(func() {
DeleteNamespaces(cluster1.Cluster.Namespace, cluster2.Cluster.Namespace)
})
It("can create pods in each of them that are isolated", func() {
pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("")
pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("")
pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("")
var (
stdout string
stderr string
curlCmd string
err error
)
By("Checking that Pods can reach themselves")
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
// Pods in the same Virtual Cluster should be able to reach each other
// Pod1 should be able to call Pod2, and viceversa
By("Checking that Pods in the same virtual clusters can reach each other")
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
By("Checking that Pods in the different virtual clusters cannot reach each other")
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).Should(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
})
})

View File

@@ -9,13 +9,12 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var _ = When("k3k is installed", func() {
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
@@ -42,128 +41,44 @@ var _ = When("k3k is installed", func() {
})
})
var _ = When("a ephemeral cluster is installed", func() {
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
var namespace string
var virtualCluster *VirtualCluster
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralNodeType,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "nginx",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)
return pod.Status.Phase == corev1.PodRunning
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
_, _ = virtualCluster.NewNginxPod("")
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralNodeType,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
@@ -177,7 +92,7 @@ var _ = When("a ephemeral cluster is installed", func() {
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
@@ -188,8 +103,8 @@ var _ = When("a ephemeral cluster is installed", func() {
By("Recover new config should succeed")
Eventually(func() error {
virtualK8sClient = NewVirtualK8sClient(cluster)
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
@@ -200,126 +115,52 @@ var _ = When("a ephemeral cluster is installed", func() {
var _ = When("a dynamic cluster is installed", func() {
var namespace string
var virtualCluster *VirtualCluster
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.DynamicNodesType,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "nginx",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)
return pod.Status.Phase == corev1.PodRunning
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
_, _ = virtualCluster.NewNginxPod("")
})
It("use the same bootstrap secret after a restart", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.DynamicNodesType,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
@@ -333,8 +174,7 @@ var _ = When("a dynamic cluster is installed", func() {
By("Using old k8s client configuration should succeed")
Eventually(func() error {
virtualK8sClient = NewVirtualK8sClient(cluster)
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).

View File

@@ -1,29 +1,142 @@
package k3k_test
import (
"bytes"
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func NewVirtualCluster(cluster v1alpha1.Cluster) {
type VirtualCluster struct {
Cluster *v1alpha1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
}
func NewVirtualCluster() *VirtualCluster {
GinkgoHelper()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
return &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
}
// NewVirtualClusters will create multiple Virtual Clusters asynchronously
func NewVirtualClusters(n int) []*VirtualCluster {
GinkgoHelper()
var clusters []*VirtualCluster
wg := sync.WaitGroup{}
wg.Add(n)
for range n {
go func() {
defer wg.Done()
defer GinkgoRecover()
clusters = append(clusters, NewVirtualCluster())
}()
}
wg.Wait()
return clusters
}
func NewNamespace() *corev1.Namespace {
GinkgoHelper()
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
return namespace
}
func DeleteNamespaces(names ...string) {
GinkgoHelper()
wg := sync.WaitGroup{}
wg.Add(len(names))
for _, name := range names {
go func() {
defer wg.Done()
defer GinkgoRecover()
deleteNamespace(name)
}()
}
wg.Wait()
}
func deleteNamespace(name string) {
GinkgoHelper()
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
},
},
}
}
func CreateCluster(cluster *v1alpha1.Cluster) {
GinkgoHelper()
ctx := context.Background()
err := k8sClient.Create(ctx, &cluster)
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// check that the server Pod and the Kubelet are in Ready state
@@ -55,22 +168,30 @@ func NewVirtualCluster(cluster v1alpha1.Cluster) {
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
return virtualK8sClient
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
GinkgoHelper()
var err error
var (
err error
config *clientcmdapi.Config
)
ctx := context.Background()
var config *clientcmdapi.Config
Eventually(func() error {
vKubeconfig := kubeconfig.New()
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
config, err = vKubeconfig.Extract(ctx, k8sClient, &cluster, hostIP)
config, err = vKubeconfig.Extract(ctx, k8sClient, cluster, hostIP)
return err
}).
WithTimeout(time.Minute * 2).
@@ -85,5 +206,97 @@ func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
return virtualK8sClient
return virtualK8sClient, restcfg
}
func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
GinkgoHelper()
if namespace == "" {
namespace = "default"
}
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "nginx-",
Namespace: namespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
By("Creating Pod")
ctx := context.Background()
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
var podIP string
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
podIP = pod.Status.PodIP
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s podIP=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
)
return pod.Status.Phase == corev1.PodRunning && podIP != ""
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
// get the running pod from the virtual cluster
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
return nginxPod, podIP
}
// ExecCmd exec command on specific pod and wait the command's output.
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
option := &corev1.PodExecOptions{
Command: []string{"sh", "-c", command},
Stdout: true,
Stderr: true,
}
req := c.Client.CoreV1().RESTClient().Post().Resource("pods").Name(pod.Name).Namespace(pod.Namespace).SubResource("exec")
req.VersionedParams(option, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(c.RestConfig, "POST", req.URL())
if err != nil {
return "", "", err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: stdout,
Stderr: stderr,
})
return stdout.String(), stderr.String(), err
}

View File

@@ -161,8 +161,10 @@ func buildScheme() *runtime.Scheme {
}
func writeK3kLogs() {
var err error
var podList v1.PodList
var (
err error
podList v1.PodList
)
ctx := context.Background()
err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"})
@@ -176,11 +178,14 @@ func writeK3kLogs() {
}
func writeLogs(filename string, logs io.ReadCloser) {
defer logs.Close()
logsStr, err := io.ReadAll(logs)
Expect(err).To(Not(HaveOccurred()))
defer logs.Close()
tempfile := path.Join(os.TempDir(), filename)
err = os.WriteFile(tempfile, []byte(logsStr), 0644)
Expect(err).To(Not(HaveOccurred()))
fmt.Fprintln(GinkgoWriter, "logs written to: "+filename)
}