mirror of
https://github.com/rancher/k3k.git
synced 2026-03-02 01:30:27 +00:00
Compare commits
30 Commits
chart-0.1.
...
chart-0.3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6105402bf2 | ||
|
|
6031eeb09b | ||
|
|
2f582a473a | ||
|
|
26d3d29ba1 | ||
|
|
e97a3f5966 | ||
|
|
07b9cdcc86 | ||
|
|
a3cbe42782 | ||
|
|
3cf8c0a744 | ||
|
|
ddc367516b | ||
|
|
7b83b9fd36 | ||
|
|
bf8fdd9071 | ||
|
|
4ca5203df1 | ||
|
|
5e8bc0d3cd | ||
|
|
430e18bf30 | ||
|
|
ec0e5a4a87 | ||
|
|
29438121ba | ||
|
|
c2cde0c9ba | ||
|
|
1be43e0564 | ||
|
|
8913772240 | ||
|
|
dbbe03ca96 | ||
|
|
e52a682cca | ||
|
|
26a0bb6583 | ||
|
|
dee20455ee | ||
|
|
f5c9a4b3a1 | ||
|
|
65fe7a678f | ||
|
|
8811ba74de | ||
|
|
127b5fc848 | ||
|
|
d95e3fd33a | ||
|
|
1f4b3c4835 | ||
|
|
0056e4a3f7 |
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@@ -21,6 +21,9 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -35,6 +35,9 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: "Read secrets"
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
if: github.repository_owner == 'rancher'
|
||||
|
||||
38
.github/workflows/test.yaml
vendored
38
.github/workflows/test.yaml
vendored
@@ -37,24 +37,11 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Check go modules
|
||||
run: |
|
||||
go mod tidy
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(git status --porcelain)"
|
||||
|
||||
- name: Install tools
|
||||
run: |
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
|
||||
ENVTEST_BIN=$(setup-envtest use -p path)
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
|
||||
- name: Run tests
|
||||
run: ginkgo -v -r --skip-file=tests
|
||||
- name: Run unit tests
|
||||
run: make test-unit
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -70,19 +57,16 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Check go modules
|
||||
run: |
|
||||
go mod tidy
|
||||
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(git status --porcelain)"
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Build
|
||||
- name: Build and package
|
||||
run: |
|
||||
./scripts/build
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
@@ -90,8 +74,8 @@ jobs:
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run tests
|
||||
run: ginkgo -v ./tests
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,3 +7,4 @@
|
||||
.vscode/
|
||||
__debug*
|
||||
*-kubeconfig.yaml
|
||||
.envtest
|
||||
|
||||
@@ -10,3 +10,4 @@ linters:
|
||||
|
||||
# extra
|
||||
- misspell
|
||||
- wsl
|
||||
|
||||
@@ -67,29 +67,78 @@ archives:
|
||||
# REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z
|
||||
#
|
||||
dockers:
|
||||
- id: k3k
|
||||
use: docker
|
||||
# k3k amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile"
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
|
||||
- id: k3k-kubelet
|
||||
use: docker
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.kubelet"
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
# k3k-kubelet amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
docker_manifests:
|
||||
# k3k
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
ARG GOLANG=rancher/hardened-build-base:v1.23.4b1
|
||||
FROM ${GOLANG}
|
||||
|
||||
ARG DAPPER_HOST_ARCH
|
||||
ENV ARCH $DAPPER_HOST_ARCH
|
||||
|
||||
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
|
||||
RUN if [ "${ARCH}" == "amd64" ]; then \
|
||||
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.59.0; \
|
||||
fi
|
||||
|
||||
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
|
||||
&& mv cr /bin/
|
||||
|
||||
# Tool for CRD generation.
|
||||
ENV CONTROLLER_GEN_VERSION v0.14.0
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_GEN_VERSION}
|
||||
|
||||
# Tool to setup the envtest framework to run the controllers integration tests
|
||||
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
|
||||
ENVTEST_BIN=$(setup-envtest use -p path) && \
|
||||
mkdir -p /usr/local/kubebuilder/bin && \
|
||||
cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
|
||||
ENV GO111MODULE on
|
||||
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS GIT_TAG
|
||||
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
|
||||
ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV HOME ${DAPPER_SOURCE}
|
||||
WORKDIR ${DAPPER_SOURCE}
|
||||
|
||||
ENTRYPOINT ["./ops/entry"]
|
||||
CMD ["ci"]
|
||||
114
Makefile
114
Makefile
@@ -1,14 +1,106 @@
|
||||
TARGETS := $(shell ls ops)
|
||||
.dapper:
|
||||
@echo Downloading dapper
|
||||
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
|
||||
@@chmod +x .dapper.tmp
|
||||
@./.dapper.tmp -v
|
||||
@mv .dapper.tmp .dapper
|
||||
|
||||
$(TARGETS): .dapper
|
||||
./.dapper $@
|
||||
REPO ?= rancher
|
||||
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
.DEFAULT_GOAL := default
|
||||
## Dependencies
|
||||
|
||||
.PHONY: $(TARGETS)
|
||||
GOLANGCI_LINT_VERSION := v1.63.4
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
ENVTEST_VERSION ?= latest
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
CONTROLLER_GEN ?= go run sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: version build-crds build package ## Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
|
||||
.PHONY: version
|
||||
version: ## Print the current version
|
||||
@echo $(VERSION)
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
@VERSION=$(VERSION) ./scripts/build
|
||||
|
||||
.PHONY: package
|
||||
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
|
||||
|
||||
.PHONY: package-%
|
||||
package-%:
|
||||
docker build -f package/Dockerfile.$* \
|
||||
-t $(REPO)/$*:$(VERSION) \
|
||||
-t $(REPO)/$*:latest \
|
||||
-t $(REPO)/$*:dev .
|
||||
|
||||
.PHONY: push
|
||||
push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry
|
||||
|
||||
.PHONY: push-%
|
||||
push-%:
|
||||
docker push $(REPO)/$*:$(VERSION)
|
||||
docker push $(REPO)/$*:latest
|
||||
docker push $(REPO)/$*:dev
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) -v -r
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
$(GINKGO) -v -r --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) -v -r pkg/controller
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) -v -r tests
|
||||
|
||||
.PHONY: build-crds
|
||||
build-crds: ## Build the CRDs specs
|
||||
@# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
@# allowDangerousTypes is needed for struct that use floats
|
||||
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
|
||||
paths=./pkg/apis/... \
|
||||
output:crd:dir=./charts/k3k/crds
|
||||
|
||||
.PHONY: docs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml --renderer=markdown --source-path=./pkg/apis/k3k.io/v1alpha1 --output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: validate
|
||||
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
|
||||
go mod tidy
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(shell git status --porcelain)"
|
||||
|
||||
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set image.repository=$(REPO)/k3k \
|
||||
--set image.tag=$(VERSION) \
|
||||
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set sharedAgent.image.tag=$(VERSION) \
|
||||
k3k ./charts/k3k/
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help.
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
|
||||
17
README.md
17
README.md
@@ -39,7 +39,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
### Prerequisites
|
||||
|
||||
* [Helm](https://helm.sh) must be installed to use the charts. Please refer to Helm's [documentation](https://helm.sh/docs) to get started.
|
||||
* An existing [RKE2](https://docs.rke2.io/install/quickstart) Kubernetes cluster (recommended).
|
||||
* A configured storage provider with a default storage class.
|
||||
|
||||
**Note:** If you do not have a storage provider, you can configure the cluster to use ephemeral or static storage. Please consult the [k3kcli advance usage](./docs/advanced-usage.md#using-the-cli) for instructions on using these options.
|
||||
|
||||
### Install the K3k controller
|
||||
|
||||
@@ -68,7 +71,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -76,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kc
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.2.2-rc4
|
||||
k3kcli Version: v0.3.0
|
||||
```
|
||||
|
||||
|
||||
@@ -94,6 +97,16 @@ To create a new K3k cluster, use the following command:
|
||||
```bash
|
||||
k3kcli cluster create mycluster
|
||||
```
|
||||
> [!NOTE]
|
||||
> **Creating a K3k Cluster on a Rancher-Managed Host Cluster**
|
||||
>
|
||||
> If your *host* Kubernetes cluster is managed by Rancher (e.g., your kubeconfig's `server` address includes a Rancher URL), use the `--kubeconfig-server` flag when creating your K3k cluster:
|
||||
>
|
||||
>```bash
|
||||
>k3kcli cluster create --kubeconfig-server <host_node_IP_or_load_balancer_IP> mycluster
|
||||
>```
|
||||
>
|
||||
> This ensures the generated kubeconfig connects to the correct endpoint.
|
||||
|
||||
When the K3s server is ready, `k3kcli` will generate the necessary kubeconfig file and print instructions on how to use it.
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.1.6-r1
|
||||
appVersion: v0.2.2-rc5
|
||||
version: 0.3.1-r1
|
||||
appVersion: v0.3.1-rc1
|
||||
|
||||
@@ -17,6 +17,10 @@ spec:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
@@ -37,51 +41,61 @@ spec:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the Cluster.
|
||||
properties:
|
||||
addons:
|
||||
description: Addons is a list of secrets containing raw YAML which
|
||||
will be deployed in the virtual K3k cluster on startup.
|
||||
description: Addons specifies secrets containing raw YAML to deploy
|
||||
on cluster startup.
|
||||
items:
|
||||
description: Addon specifies a Secret containing YAML to be deployed
|
||||
on cluster startup.
|
||||
properties:
|
||||
secretNamespace:
|
||||
description: SecretNamespace is the namespace of the Secret.
|
||||
type: string
|
||||
secretRef:
|
||||
description: SecretRef is the name of the Secret.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
agentArgs:
|
||||
description: AgentArgs are the ordered key value pairs (e.x. "testArg",
|
||||
"testValue") for the K3s pods running in agent mode.
|
||||
description: |-
|
||||
AgentArgs specifies ordered key-value pairs for K3s agent pods.
|
||||
Example: ["--node-name=my-agent-node"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
agents:
|
||||
default: 0
|
||||
description: Agents is the number of K3s pods to run in agent (worker)
|
||||
mode.
|
||||
description: |-
|
||||
Agents specifies the number of K3s pods to run in agent (worker) mode.
|
||||
Must be 0 or greater. Defaults to 0.
|
||||
This field is ignored in "shared" mode.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: invalid value for agents
|
||||
rule: self >= 0
|
||||
clusterCIDR:
|
||||
description: ClusterCIDR is the CIDR range for the pods of the cluster.
|
||||
Defaults to 10.42.0.0/16.
|
||||
description: |-
|
||||
ClusterCIDR is the CIDR range for pod IPs.
|
||||
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
clusterDNS:
|
||||
description: |-
|
||||
ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
|
||||
Defaults to 10.43.0.10.
|
||||
ClusterDNS is the IP address for the CoreDNS service.
|
||||
Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
clusterLimit:
|
||||
description: Limit is the limits that apply for the server/worker
|
||||
nodes.
|
||||
description: Limit defines resource limits for server/agent nodes.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
@@ -90,8 +104,8 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit is the limits (cpu/mem) that apply to
|
||||
the server nodes
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
@@ -100,51 +114,53 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit is the limits (cpu/mem) that apply to
|
||||
the agent nodes
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
|
||||
clusterIP which is relatively secure, but difficult to access outside of the cluster.
|
||||
Expose specifies options for exposing the API server.
|
||||
By default, it's only exposed as a ClusterIP.
|
||||
properties:
|
||||
ingress:
|
||||
description: Ingress specifies options for exposing the API server
|
||||
through an Ingress.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Annotations is a key value map that will enrich
|
||||
the Ingress annotations
|
||||
description: Annotations specifies annotations to add to the
|
||||
Ingress.
|
||||
type: object
|
||||
ingressClassName:
|
||||
description: IngressClassName specifies the IngressClass to
|
||||
use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadbalancer:
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
required:
|
||||
- enabled
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
type: object
|
||||
nodePort:
|
||||
description: NodePort specifies options for exposing the API server
|
||||
through NodePort.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
servicePort:
|
||||
description: |-
|
||||
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
@@ -158,8 +174,9 @@ spec:
|
||||
- shared
|
||||
- virtual
|
||||
default: shared
|
||||
description: Mode is the cluster provisioning mode which can be either
|
||||
"shared" or "virtual". Defaults to "shared"
|
||||
description: |-
|
||||
Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
Defaults to "shared". This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
@@ -168,64 +185,75 @@ spec:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is the node selector that will be applied to all server/agent pods.
|
||||
In "shared" mode the node selector will be applied also to the workloads.
|
||||
NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: object
|
||||
persistence:
|
||||
default:
|
||||
type: dynamic
|
||||
description: |-
|
||||
Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
|
||||
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
|
||||
Persistence specifies options for persisting etcd data.
|
||||
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
A default StorageClass is required for dynamic persistence.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: PersistenceMode is the storage mode of a Cluster.
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
priorityClass:
|
||||
description: |-
|
||||
PriorityClass is the priorityClassName that will be applied to all server/agent pods.
|
||||
In "shared" mode the priorityClassName will be applied also to the workloads.
|
||||
PriorityClass specifies the priorityClassName for server/agent pods.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: string
|
||||
serverArgs:
|
||||
description: ServerArgs are the ordered key value pairs (e.x. "testArg",
|
||||
"testValue") for the K3s pods running in server mode.
|
||||
description: |-
|
||||
ServerArgs specifies ordered key-value pairs for K3s server pods.
|
||||
Example: ["--tls-san=example.com"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
servers:
|
||||
default: 1
|
||||
description: Servers is the number of K3s pods to run in server (controlplane)
|
||||
mode.
|
||||
description: |-
|
||||
Servers specifies the number of K3s pods to run in server (control plane) mode.
|
||||
Must be at least 1. Defaults to 1.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: cluster must have at least one server
|
||||
rule: self >= 1
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for the services in the
|
||||
cluster. Defaults to 10.43.0.0/16.
|
||||
description: |-
|
||||
ServiceCIDR is the CIDR range for service IPs.
|
||||
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: serviceCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
tlsSANs:
|
||||
description: TLSSANs are the subjectAlternativeNames for the certificate
|
||||
the K3s server will use.
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tokenSecretRef:
|
||||
description: |-
|
||||
TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
|
||||
assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
|
||||
TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
The Secret must have a "token" field in its data.
|
||||
properties:
|
||||
name:
|
||||
description: name is unique within a namespace to reference a
|
||||
@@ -238,34 +266,50 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
version:
|
||||
description: Version is a string representing the Kubernetes version
|
||||
to be used by the virtual nodes.
|
||||
description: |-
|
||||
Version is the K3s version to use for the virtual nodes.
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
If not specified, the Kubernetes version of the host node will be used.
|
||||
type: string
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
properties:
|
||||
clusterCIDR:
|
||||
description: ClusterCIDR is the CIDR range for pod IPs.
|
||||
type: string
|
||||
clusterDNS:
|
||||
description: ClusterDNS is the IP address for the CoreDNS service.
|
||||
type: string
|
||||
hostVersion:
|
||||
description: HostVersion is the Kubernetes version of the host node.
|
||||
type: string
|
||||
persistence:
|
||||
description: Persistence specifies options for persisting etcd data.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: PersistenceMode is the storage mode of a Cluster.
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for service IPs.
|
||||
type: string
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
||||
@@ -17,6 +17,9 @@ spec:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
It allows defining common configurations and constraints for the clusters within the set.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
@@ -37,12 +40,12 @@ spec:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec is the spec of the ClusterSet
|
||||
description: Spec defines the desired state of the ClusterSet.
|
||||
properties:
|
||||
allowedNodeTypes:
|
||||
default:
|
||||
- shared
|
||||
description: AllowedNodeTypes are the allowed cluster provisioning
|
||||
description: AllowedNodeTypes specifies the allowed cluster provisioning
|
||||
modes. Defaults to [shared].
|
||||
items:
|
||||
description: ClusterMode is the possible provisioning mode of a
|
||||
@@ -57,8 +60,8 @@ spec:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultLimits:
|
||||
description: DefaultLimits are the limits used for servers/agents
|
||||
when a cluster in the set doesn't provide any
|
||||
description: DefaultLimits specifies the default resource limits for
|
||||
servers/agents when a cluster in the set doesn't provide any.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
@@ -67,8 +70,8 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit is the limits (cpu/mem) that apply to
|
||||
the server nodes
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
@@ -77,23 +80,22 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit is the limits (cpu/mem) that apply to
|
||||
the agent nodes
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector is the node selector that applies
|
||||
to all clusters (server + agent) in the set
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the set.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass is the priorityClassName applied
|
||||
to all pods of all clusters in the set
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the set.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy is an option that will disable the
|
||||
creation of a default networkpolicy for cluster isolation
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
maxLimits:
|
||||
additionalProperties:
|
||||
@@ -102,12 +104,12 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimits are the limits that apply to all clusters (server
|
||||
+ agent) in the set
|
||||
description: MaxLimits specifies the maximum resource limits that
|
||||
apply to all clusters (server + agent) in the set.
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel is the policy level applied
|
||||
to the pods in the namespace.
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
@@ -115,11 +117,11 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
status:
|
||||
description: Status is the status of the ClusterSet
|
||||
description: Status reflects the observed state of the ClusterSet.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the invidual conditions for the cluster
|
||||
set
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource.\n---\nThis struct is intended for
|
||||
@@ -190,7 +192,7 @@ spec:
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated
|
||||
updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
@@ -198,7 +200,7 @@ spec:
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status
|
||||
description: Summary is a summary of the status.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
package cluster
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewCommand() *cli.Command {
|
||||
func NewClusterCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewCreateCmd(),
|
||||
NewDeleteCmd(),
|
||||
NewClusterCreateCmd(),
|
||||
NewClusterDeleteCmd(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package cluster
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
@@ -18,9 +17,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
@@ -28,13 +25,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
type CreateConfig struct {
|
||||
token string
|
||||
clusterCIDR string
|
||||
@@ -50,17 +40,17 @@ type CreateConfig struct {
|
||||
kubeconfigServerHost string
|
||||
}
|
||||
|
||||
func NewCreateCmd() *cli.Command {
|
||||
func NewClusterCreateCmd() *cli.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
createFlags := NewCreateFlags(createConfig)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
Action: createAction(createConfig),
|
||||
Flags: append(cmds.CommonFlags, createFlags...),
|
||||
Args: false,
|
||||
ArgsUsage: "NAME",
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(createConfig),
|
||||
Flags: append(CommonFlags, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,14 +58,16 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == "" {
|
||||
return errors.New("empty cluster name")
|
||||
} else if name == k3kcluster.ClusterInvalidName {
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -87,9 +79,17 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.Contains(config.version, "+") {
|
||||
orig := config.version
|
||||
config.version = strings.Replace(config.version, "+", "-", -1)
|
||||
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Infof("Creating cluster token secret")
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, cmds.Namespace())
|
||||
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, Namespace())
|
||||
|
||||
if err := ctrlClient.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
|
||||
logrus.Infof("Creating a new cluster [%s]", name)
|
||||
|
||||
cluster := newCluster(name, cmds.Namespace(), config)
|
||||
cluster := newCluster(name, Namespace(), config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
@@ -108,10 +108,12 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if config.kubeconfigServerHost != "" {
|
||||
host = []string{config.kubeconfigServerHost}
|
||||
}
|
||||
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := ctrlClient.Create(ctx, cluster); err != nil {
|
||||
@@ -136,6 +138,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
cfg := kubeconfig.New()
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
|
||||
return err
|
||||
@@ -191,6 +194,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
if config.storageClassName == "" {
|
||||
cluster.Spec.Persistence.StorageClassName = nil
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
cluster.Spec.TokenSecretRef = &v1.SecretReference{
|
||||
Name: k3kcluster.TokenSecretName(name),
|
||||
@@ -1,4 +1,4 @@
|
||||
package cluster
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -43,15 +43,15 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "persistence-type",
|
||||
Usage: "persistence mode for the nodes (ephemeral, static, dynamic)",
|
||||
Value: string(v1alpha1.DynamicNodesType),
|
||||
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
|
||||
Value: string(v1alpha1.DynamicPersistenceMode),
|
||||
Destination: &config.persistenceType,
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch v1alpha1.PersistenceMode(value) {
|
||||
case v1alpha1.EphemeralNodeType, v1alpha1.DynamicNodesType:
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "ephemeral", "static" or "dynamic"`)
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -61,14 +61,14 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
Destination: &config.storageClassName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Value: &config.serverArgs,
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Destination: &config.serverArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Value: &config.agentArgs,
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Destination: &config.agentArgs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
@@ -77,7 +77,7 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "k3k mode type",
|
||||
Usage: "k3k mode type (shared, virtual)",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
@@ -1,10 +1,9 @@
|
||||
package cluster
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -14,26 +13,30 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func NewDeleteCmd() *cli.Command {
|
||||
func NewClusterDeleteCmd() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
Action: delete,
|
||||
Flags: cmds.CommonFlags,
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete,
|
||||
Flags: CommonFlags,
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func delete(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == "" {
|
||||
return errors.New("empty cluster name")
|
||||
} else if name == k3kcluster.ClusterInvalidName {
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -50,8 +53,9 @@ func delete(clx *cli.Context) error {
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: cmds.Namespace(),
|
||||
Namespace: Namespace(),
|
||||
},
|
||||
}
|
||||
|
||||
return ctrlClient.Delete(ctx, &cluster)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package kubeconfig
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
@@ -16,23 +15,15 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
name string
|
||||
cn string
|
||||
org cli.StringSlice
|
||||
@@ -88,11 +79,11 @@ var subcommands = []*cli.Command{
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate,
|
||||
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
|
||||
Flags: append(CommonFlags, generateKubeconfigFlags...),
|
||||
},
|
||||
}
|
||||
|
||||
func NewCommand() *cli.Command {
|
||||
func NewKubeconfigCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
@@ -102,9 +93,10 @@ func NewCommand() *cli.Command {
|
||||
|
||||
func generate(clx *cli.Context) error {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -115,9 +107,10 @@ func generate(clx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cmds.Namespace(),
|
||||
Namespace: Namespace(),
|
||||
}
|
||||
|
||||
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
@@ -128,11 +121,12 @@ func generate(clx *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
err := altNames.Set(kubeconfigServerHost)
|
||||
if err != nil {
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -154,6 +148,7 @@ func generate(clx *cli.Context) error {
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
|
||||
return err
|
||||
@@ -1,10 +1,14 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -12,16 +16,19 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
|
||||
CommonFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &Kubeconfig,
|
||||
Value: os.Getenv("HOME") + "/.kube/config",
|
||||
Value: "$HOME/.kube/config",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
@@ -31,6 +38,11 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
func NewApp() *cli.App {
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
@@ -48,9 +60,20 @@ func NewApp() *cli.App {
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Version = buildinfo.Version
|
||||
cli.VersionPrinter = func(cCtx *cli.Context) {
|
||||
fmt.Println("k3kcli Version: " + buildinfo.Version)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
NewClusterCommand(),
|
||||
NewKubeconfigCommand(),
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -58,5 +81,6 @@ func Namespace() string {
|
||||
if namespace == "" {
|
||||
return defaultNamespace
|
||||
}
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
15
cli/main.go
15
cli/main.go
@@ -1,29 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/cli/cmds/cluster"
|
||||
"github.com/rancher/k3k/cli/cmds/kubeconfig"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Version = buildinfo.Version
|
||||
cli.VersionPrinter = func(cCtx *cli.Context) {
|
||||
fmt.Println("k3kcli Version: " + buildinfo.Version)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
cluster.NewCommand(),
|
||||
kubeconfig.NewCommand(),
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the `k3kcli` documentation for more details.
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
|
||||
|
||||
|
||||
|
||||
@@ -94,14 +94,14 @@ In this example we are exposing the Cluster with a Nginx ingress-controller, tha
|
||||
|
||||
### `clusterCIDR`
|
||||
|
||||
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16`.
|
||||
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16` in shared mode, and `10.52.0.0/16` in virtual mode.
|
||||
|
||||
|
||||
### `serviceCIDR`
|
||||
|
||||
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16`.
|
||||
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16` in shared mode, and `10.53.0.0/16` in virtual mode.
|
||||
|
||||
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts.
|
||||
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts and in `virtual` mode both `serviceCIDR` and `clusterCIDR` should be different than the host cluster.
|
||||
|
||||
|
||||
### `clusterDNS`
|
||||
@@ -112,3 +112,21 @@ The `clusterDNS` field specifies the IP address for the CoreDNS service. It need
|
||||
### `serverArgs`
|
||||
|
||||
The `serverArgs` field allows you to specify additional arguments to be passed to the K3s server pods.
|
||||
|
||||
## Using the cli
|
||||
|
||||
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
|
||||
|
||||
### No storage provider:
|
||||
|
||||
* Ephemeral Storage:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create my-cluster --persistence-type ephemeral
|
||||
```
|
||||
|
||||
*Important Notes:*
|
||||
|
||||
* Using `--persistence-type ephemeral` will result in data loss if the nodes are restarted.
|
||||
|
||||
* It is highly recommended to use `--persistence-type dynamic` with a configured storage class.
|
||||
98
docs/cli/cli-docs.md
Normal file
98
docs/cli/cli-docs.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# NAME
|
||||
|
||||
k3kcli - CLI for K3K
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
k3kcli
|
||||
|
||||
```
|
||||
[--debug]
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
|
||||
```
|
||||
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||
```
|
||||
|
||||
# GLOBAL OPTIONS
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
|
||||
# COMMANDS
|
||||
|
||||
## cluster
|
||||
|
||||
cluster command
|
||||
|
||||
### create
|
||||
|
||||
Create new cluster
|
||||
|
||||
>k3kcli cluster create [command options] NAME
|
||||
|
||||
**--agent-args**="": agents extra arguments
|
||||
|
||||
**--agents**="": number of agents (default: 0)
|
||||
|
||||
**--cluster-cidr**="": cluster CIDR
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
|
||||
|
||||
**--server-args**="": servers extra arguments
|
||||
|
||||
**--servers**="": number of servers (default: 1)
|
||||
|
||||
**--service-cidr**="": service CIDR
|
||||
|
||||
**--storage-class-name**="": storage class name for dynamic persistence type
|
||||
|
||||
**--token**="": token of the cluster
|
||||
|
||||
**--version**="": k3s version
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing cluster
|
||||
|
||||
>k3kcli cluster delete [command options] NAME
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
## kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
|
||||
### generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
|
||||
**--altNames**="": altNames of the generated certificates for the kubeconfig
|
||||
|
||||
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
|
||||
|
||||
**--config-name**="": the name of the generated kubeconfig file
|
||||
|
||||
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--name**="": cluster name
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
37
docs/cli/genclidoc.go
Normal file
37
docs/cli/genclidoc.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Instantiate the CLI application
|
||||
app := cmds.NewApp()
|
||||
|
||||
// Generate the Markdown documentation
|
||||
md, err := app.ToMarkdown()
|
||||
if err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
|
||||
|
||||
err = os.WriteFile(outputFile, []byte(md), 0644)
|
||||
if err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Documentation generated at " + outputFile)
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
CRD_REF_DOCS_VER := v0.1.0
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
.PHONY: generate
|
||||
generate:
|
||||
$(CRD_REF_DOCS) --config=config.yaml --renderer=markdown --source-path=../../pkg/apis/k3k.io/v1alpha1 --output-path=crd-docs.md
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
|
||||
|
||||
|
||||
Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
|
||||
|
||||
|
||||
@@ -26,15 +26,17 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretNamespace` _string_ | | | |
|
||||
| `secretRef` _string_ | | | |
|
||||
| `secretNamespace` _string_ | SecretNamespace is the namespace of the Secret. | | |
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
|
||||
|
||||
|
||||
@@ -46,14 +48,14 @@ _Appears in:_
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `Cluster` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | | \{ \} | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterLimit
|
||||
|
||||
|
||||
|
||||
|
||||
ClusterLimit defines resource limits for server and agent nodes.
|
||||
|
||||
|
||||
|
||||
@@ -62,15 +64,15 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit is the limits (cpu/mem) that apply to the server nodes | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit is the limits (cpu/mem) that apply to the agent nodes | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
|
||||
|
||||
|
||||
|
||||
ClusterList is a list of Cluster resources.
|
||||
|
||||
|
||||
|
||||
@@ -102,7 +104,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
|
||||
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
|
||||
|
||||
|
||||
@@ -111,23 +113,23 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `version` _string_ | Version is a string representing the Kubernetes version to be used by the virtual nodes. | | |
|
||||
| `servers` _integer_ | Servers is the number of K3s pods to run in server (controlplane) mode. | 1 | |
|
||||
| `agents` _integer_ | Agents is the number of K3s pods to run in agent (worker) mode. | 0 | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the node selector that will be applied to all server/agent pods.<br />In "shared" mode the node selector will be applied also to the workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass is the priorityClassName that will be applied to all server/agent pods.<br />In "shared" mode the priorityClassName will be applied also to the workloads. | | |
|
||||
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit is the limits that apply for the server/worker nodes. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller<br />assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored. | | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.<br />Defaults to 10.43.0.10. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode. | | |
|
||||
| `agentArgs` _string array_ | AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use. | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup. | | |
|
||||
| `mode` _[ClusterMode](#clustermode)_ | Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared" | shared | Enum: [shared virtual] <br /> |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data<br />persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. | \{ type:dynamic \} | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a<br />clusterIP which is relatively secure, but difficult to access outside of the cluster. | | |
|
||||
| `version` _string_ | Version is the K3s version to use for the virtual nodes.<br />It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).<br />If not specified, the Kubernetes version of the host node will be used. | | |
|
||||
| `mode` _[ClusterMode](#clustermode)_ | Mode specifies the cluster provisioning mode: "shared" or "virtual".<br />Defaults to "shared". This field is immutable. | shared | Enum: [shared virtual] <br /> |
|
||||
| `servers` _integer_ | Servers specifies the number of K3s pods to run in server (control plane) mode.<br />Must be at least 1. Defaults to 1. | 1 | |
|
||||
| `agents` _integer_ | Agents specifies the number of K3s pods to run in agent (worker) mode.<br />Must be 0 or greater. Defaults to 0.<br />This field is ignored in "shared" mode. | 0 | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit defines resource limits for server/agent nodes. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
|
||||
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
|
||||
|
||||
|
||||
@@ -136,7 +138,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
|
||||
ExposeConfig specifies options for exposing the API server.
|
||||
|
||||
|
||||
|
||||
@@ -145,16 +147,16 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | | | |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
|
||||
|
||||
|
||||
#### IngressConfig
|
||||
|
||||
|
||||
|
||||
|
||||
IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
|
||||
|
||||
|
||||
@@ -163,31 +165,28 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `annotations` _object (keys:string, values:string)_ | Annotations is a key value map that will enrich the Ingress annotations | | |
|
||||
| `ingressClassName` _string_ | | | |
|
||||
| `annotations` _object (keys:string, values:string)_ | Annotations specifies annotations to add to the Ingress. | | |
|
||||
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
|
||||
|
||||
|
||||
|
||||
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ExposeConfig](#exposeconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | | | |
|
||||
|
||||
|
||||
#### NodePortConfig
|
||||
|
||||
|
||||
|
||||
|
||||
NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
|
||||
|
||||
|
||||
@@ -196,16 +195,16 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
|
||||
|
||||
#### PersistenceConfig
|
||||
|
||||
|
||||
|
||||
|
||||
PersistenceConfig specifies options for persisting etcd data.
|
||||
|
||||
|
||||
|
||||
@@ -215,9 +214,9 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | | dynamic | |
|
||||
| `storageClassName` _string_ | | | |
|
||||
| `storageRequestSize` _string_ | | | |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
|
||||
@@ -1,15 +1,93 @@
|
||||
# Development
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To start developing K3k you will need:
|
||||
|
||||
- Go
|
||||
- Docker
|
||||
- Helm
|
||||
- A running Kubernetes cluster
|
||||
|
||||
|
||||
### TLDR
|
||||
|
||||
```shell
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# These environment variables configure the image repository and tag.
|
||||
export REPO=ghcr.io/myuser
|
||||
export VERSION=dev-$(date -u '+%Y%m%d%H%M')
|
||||
|
||||
make
|
||||
make push
|
||||
make install
|
||||
```
|
||||
|
||||
### Makefile
|
||||
|
||||
To see all the available Make commands you can run `make help`, i.e:
|
||||
|
||||
```
|
||||
-> % make help
|
||||
all Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
version Print the current version
|
||||
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
push Push the K3k images to the registry
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
build-crds Build the CRDs specs
|
||||
docs Build the CRDs docs
|
||||
lint Find any linting issues in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
help Show this help.
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`.
|
||||
|
||||
By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var:
|
||||
|
||||
```
|
||||
REPO=ghcr.io/userorg make
|
||||
```
|
||||
|
||||
To customize the tag you can also explicitly set the VERSION:
|
||||
|
||||
```
|
||||
VERSION=dev-$(date -u '+%Y%m%d%H%M') make
|
||||
```
|
||||
|
||||
|
||||
### Push
|
||||
|
||||
You will need to push the built images to your registry, and you can use the `make push` command to do this.
|
||||
|
||||
|
||||
### Install
|
||||
|
||||
Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release.
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
To run the tests we use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
|
||||
|
||||
Install the required binaries from `envtest` with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), and then put them in the default path `/usr/local/kubebuilder/bin`:
|
||||
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
|
||||
```
|
||||
ENVTEST_BIN=$(setup-envtest use -p path)
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
```
|
||||
The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder.
|
||||
|
||||
then run `ginkgo run ./...`.
|
||||
|
||||
## CRDs and Docs
|
||||
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
|
||||
|
||||
Remember also to update the CRDs documentation running the `make docs` command.
|
||||
|
||||
1
go.mod
1
go.mod
@@ -32,6 +32,7 @@ require (
|
||||
k8s.io/apiserver v0.29.11
|
||||
k8s.io/client-go v0.29.11
|
||||
k8s.io/component-base v0.29.11
|
||||
k8s.io/component-helpers v0.29.11
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.17.5
|
||||
)
|
||||
|
||||
2
go.sum
2
go.sum
@@ -2019,6 +2019,8 @@ k8s.io/client-go v0.29.11 h1:mBX7Ub0uqpLMwWz3J/AGS/xKOZsjr349qZ1vxVoL1l8=
|
||||
k8s.io/client-go v0.29.11/go.mod h1:WOEoi/eLg2YEg3/yEd7YK3CNScYkM8AEScQadxUnaTE=
|
||||
k8s.io/component-base v0.29.11 h1:H3GJIyDNPrscvXGP6wx+9gApcwwmrUd0YtCGp5BcHBA=
|
||||
k8s.io/component-base v0.29.11/go.mod h1:0qu1WStER4wu5o8RMRndZUWPVcPH1XBy/QQiDcD6lew=
|
||||
k8s.io/component-helpers v0.29.11 h1:GdZaSLBLlCa+EzjAnpZ4fGB75rA3qqPLLZKk+CsqNyo=
|
||||
k8s.io/component-helpers v0.29.11/go.mod h1:gloyih9IiE4Qy/7iLUXqAmxYSUduuIpMCiNYuHfYvD4=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.29.11 h1:pylaiDJhgfqczvcjMDPI89+VH0OVoGQhscPH1VbBzQE=
|
||||
|
||||
@@ -31,33 +31,43 @@ func (c *config) unmarshalYAML(data []byte) error {
|
||||
if c.ClusterName == "" {
|
||||
c.ClusterName = conf.ClusterName
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
c.ClusterNamespace = conf.ClusterNamespace
|
||||
}
|
||||
|
||||
if c.HostConfigPath == "" {
|
||||
c.HostConfigPath = conf.HostConfigPath
|
||||
}
|
||||
|
||||
if c.VirtualConfigPath == "" {
|
||||
c.VirtualConfigPath = conf.VirtualConfigPath
|
||||
}
|
||||
|
||||
if c.KubeletPort == "" {
|
||||
c.KubeletPort = conf.KubeletPort
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
c.AgentHostname = conf.AgentHostname
|
||||
}
|
||||
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = conf.ServiceName
|
||||
}
|
||||
|
||||
if c.Token == "" {
|
||||
c.Token = conf.Token
|
||||
}
|
||||
|
||||
if c.ServerIP == "" {
|
||||
c.ServerIP = conf.ServerIP
|
||||
}
|
||||
|
||||
if c.Version == "" {
|
||||
c.Version = conf.Version
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -65,12 +75,15 @@ func (c *config) validate() error {
|
||||
if c.ClusterName == "" {
|
||||
return errors.New("cluster name is not provided")
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
return errors.New("cluster namespace is not provided")
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
return errors.New("agent Hostname is not provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,5 +96,6 @@ func (c *config) parse(path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.unmarshalYAML(b)
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
@@ -45,16 +46,19 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.ConfigMap
|
||||
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
@@ -66,6 +70,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
@@ -79,13 +84,14 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = c.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -94,6 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
return c.objs.Has(key)
|
||||
}
|
||||
|
||||
@@ -104,23 +111,29 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Insert(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -143,24 +156,34 @@ func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name st
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove configmap: %w", err)
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Delete(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vConfigMap corev1.ConfigMap
|
||||
err := c.VirtualClient.Get(ctx, types.NamespacedName{Namespace: virtualNamespace, Name: virtualName}, &vConfigMap)
|
||||
if err != nil {
|
||||
|
||||
key := types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return c.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
|
||||
@@ -45,17 +45,22 @@ type updateableReconciler interface {
|
||||
|
||||
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
|
||||
c.RLock()
|
||||
|
||||
controllers := c.controllers
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
c.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we need to manually lock/unlock since we intned on write locking to add a new controller
|
||||
c.RUnlock()
|
||||
|
||||
var r updateableReconciler
|
||||
|
||||
switch obj.(type) {
|
||||
case *v1.Secret:
|
||||
r = &SecretSyncer{
|
||||
@@ -89,19 +94,23 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
|
||||
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
|
||||
// logic could be used for other types.
|
||||
return fmt.Errorf("unrecognized type: %T", obj)
|
||||
|
||||
}
|
||||
|
||||
err := ctrl.NewControllerManagedBy(c.Mgr).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
|
||||
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
|
||||
|
||||
c.Unlock()
|
||||
|
||||
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
@@ -112,8 +121,10 @@ func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Objec
|
||||
c.RLock()
|
||||
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
|
||||
c.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
|
||||
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithOptions(controller.Options{
|
||||
@@ -61,11 +62,12 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
hostPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -74,10 +76,12 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
@@ -90,32 +94,28 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// getting the cluster for setting the controller reference
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
// create or update the pvc on host
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedPVC.Name, Namespace: r.clusterNamespace}, &hostPVC); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Create(ctx, syncedPVC)
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
log.Info("updating pvc on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, syncedPVC)
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
|
||||
184
k3k-kubelet/controller/pod.go
Normal file
184
k3k-kubelet/controller/pod.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
podController = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(podController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.Pod{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
|
||||
var (
|
||||
virtPod v1.Pod
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handling pod
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// reconcile pods with pvcs
|
||||
for _, vol := range virtPod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
log.Info("Handling pod with pvc")
|
||||
|
||||
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
|
||||
// and then created on the host, the PV is not synced to the host cluster.
|
||||
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
|
||||
|
||||
var pvc v1.PersistentVolumeClaim
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: pvcSource.ClaimName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.virtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
orig := pv.DeepCopy()
|
||||
pv.Status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Patch the status of PersistentVolumeClaim to Bound")
|
||||
|
||||
pvcPatch := pvc.DeepCopy()
|
||||
if pvcPatch.Annotations == nil {
|
||||
pvcPatch.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
|
||||
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.virtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
var storageClass string
|
||||
|
||||
if obj.Spec.StorageClassName != nil {
|
||||
storageClass = *obj.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: obj.Name,
|
||||
Labels: map[string]string{
|
||||
pseudoPVLabel: "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
volume.AnnBoundByController: "true",
|
||||
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolume",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "pseudopv",
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClass,
|
||||
VolumeMode: obj.Spec.VolumeMode,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
AccessModes: obj.Spec.AccessModes,
|
||||
Capacity: obj.Spec.Resources.Requests,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
APIVersion: obj.APIVersion,
|
||||
UID: obj.UID,
|
||||
ResourceVersion: obj.ResourceVersion,
|
||||
Kind: obj.Kind,
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -38,6 +38,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
@@ -45,16 +46,19 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.Secret
|
||||
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
@@ -66,6 +70,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
@@ -79,13 +84,14 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = s.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -94,6 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
return s.objs.Has(key)
|
||||
}
|
||||
|
||||
@@ -113,14 +120,18 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Insert(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -148,8 +159,10 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Delete(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -159,12 +172,15 @@ func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, v
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return s.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.Service{}).
|
||||
WithOptions(controller.Options{
|
||||
@@ -63,9 +64,11 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
|
||||
|
||||
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
|
||||
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
virtService v1.Service
|
||||
hostService v1.Service
|
||||
@@ -75,9 +78,11 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedService := s.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -89,19 +94,23 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -112,9 +121,12 @@ func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating service on the host cluster")
|
||||
|
||||
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ const (
|
||||
type webhookHandler struct {
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
nodeName string
|
||||
serviceName string
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
@@ -42,7 +41,7 @@ type webhookHandler struct {
|
||||
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, serviceName string, logger *log.Logger) error {
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
@@ -50,7 +49,6 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
serviceName: serviceName,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
nodeName: nodeName,
|
||||
}
|
||||
|
||||
// create mutator webhook configuration to the cluster
|
||||
@@ -58,6 +56,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := handler.client.Create(ctx, config); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
@@ -72,14 +71,13 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
|
||||
}
|
||||
|
||||
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
|
||||
if pod.Spec.NodeName == "" {
|
||||
pod.Spec.NodeName = w.nodeName
|
||||
}
|
||||
// look for status.* fields in the env
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, env := range container.Env {
|
||||
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
|
||||
@@ -94,22 +92,28 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Infow("extracting webhook tls from host cluster")
|
||||
|
||||
var (
|
||||
webhookTLSSecret v1.Secret
|
||||
)
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
|
||||
if !ok {
|
||||
return nil, errors.New("webhook CABundle does not exist in secret")
|
||||
}
|
||||
|
||||
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
|
||||
|
||||
return &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "admissionregistration.k8s.io/v1",
|
||||
@@ -156,10 +160,12 @@ func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
|
||||
if len(s) != 3 {
|
||||
return -1, "", errors.New("fieldpath annotation is not set correctly")
|
||||
}
|
||||
|
||||
containerIndex, err := strconv.Atoi(s[1])
|
||||
if err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
envName := s[2]
|
||||
|
||||
return containerIndex, envName, nil
|
||||
|
||||
@@ -82,6 +82,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -93,7 +94,10 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
}
|
||||
|
||||
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
|
||||
Scheme: baseScheme,
|
||||
Scheme: baseScheme,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: c.ClusterNamespace,
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8083",
|
||||
},
|
||||
@@ -107,40 +111,55 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("unable to create controller-runtime mgr for host cluster: " + err.Error())
|
||||
}
|
||||
|
||||
virtualScheme := runtime.NewScheme()
|
||||
// virtual client will only use core types (for now), no need to add anything other than the basics
|
||||
err = clientgoscheme.AddToScheme(virtualScheme)
|
||||
if err != nil {
|
||||
virtualScheme := runtime.NewScheme()
|
||||
if err := clientgoscheme.AddToScheme(virtualScheme); err != nil {
|
||||
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: "/opt/rancher/k3k-webhook",
|
||||
})
|
||||
|
||||
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: "kube-system",
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8084",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutator webhook")
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.AgentHostname, c.ServiceName, logger); err != nil {
|
||||
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
|
||||
@@ -148,6 +167,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
|
||||
// get the cluster's DNS IP to be injected to pods
|
||||
var dnsService v1.Service
|
||||
|
||||
dnsName := controller.SafeConcatNameWithPrefix(c.ClusterName, "kube-dns")
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: dnsName, Namespace: c.ClusterNamespace}, &dnsService); err != nil {
|
||||
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
|
||||
@@ -177,10 +197,16 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
|
||||
func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostClient ctrlruntimeclient.Client) (string, error) {
|
||||
var service v1.Service
|
||||
serviceKey := types.NamespacedName{Namespace: clusterNamespace, Name: serviceName}
|
||||
|
||||
serviceKey := types.NamespacedName{
|
||||
Namespace: clusterNamespace,
|
||||
Name: serviceName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, serviceKey, &service); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
@@ -189,10 +215,12 @@ func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace,
|
||||
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
|
||||
if err != nil {
|
||||
return errors.New("unable to start kubelet: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -225,10 +253,13 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
|
||||
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
|
||||
}
|
||||
|
||||
<-k.node.Done()
|
||||
|
||||
if err := k.node.Err(); err != nil {
|
||||
k.logger.Fatalw("node stopped with an error", zap.Error(err))
|
||||
}
|
||||
|
||||
k.logger.Info("node exited successfully")
|
||||
}
|
||||
|
||||
@@ -253,13 +284,16 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostna
|
||||
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
|
||||
return errors.New("unable to attach routes: " + err.Error())
|
||||
}
|
||||
|
||||
c.Handler = mux
|
||||
|
||||
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
|
||||
c.TLSConfig = tlsConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -273,8 +307,11 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
@@ -285,20 +322,27 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
adminCert, adminKey, err := certs.CreateClientCertKey(
|
||||
controller.AdminCommonName, []string{user.SystemPrivilegedGroup},
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356),
|
||||
controller.AdminCommonName,
|
||||
[]string{user.SystemPrivilegedGroup},
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
time.Hour*24*time.Duration(356),
|
||||
b.ClientCA.Content,
|
||||
b.ClientCAKey.Content)
|
||||
b.ClientCAKey.Content,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
|
||||
|
||||
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientcmd.RESTConfigFromKubeConfig(kubeconfigData)
|
||||
}
|
||||
|
||||
@@ -330,10 +374,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
cluster v1alpha1.Cluster
|
||||
b *bootstrap.ControlRuntimeBootstrap
|
||||
)
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
@@ -343,27 +390,34 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
ip := net.ParseIP(agentIP)
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{hostname},
|
||||
IPs: []net.IP{ip},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get cert and key: " + err.Error())
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get key pair: " + err.Error())
|
||||
}
|
||||
|
||||
// create rootCA CertPool
|
||||
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create ca certs: " + err.Error())
|
||||
}
|
||||
|
||||
if len(certs) < 1 {
|
||||
return nil, errors.New("ca cert is not parsed correctly")
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(certs[0])
|
||||
|
||||
|
||||
@@ -102,9 +102,11 @@ func main() {
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
return nil
|
||||
}
|
||||
app.Action = run
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
@@ -112,6 +114,7 @@ func main() {
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if err := cfg.parse(configFile); err != nil {
|
||||
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
|
||||
}
|
||||
@@ -119,6 +122,7 @@ func run(clx *cli.Context) error {
|
||||
if err := cfg.validate(); err != nil {
|
||||
logger.Fatalw("failed to validate config", zap.Error(err))
|
||||
}
|
||||
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
if err != nil {
|
||||
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
|
||||
|
||||
@@ -107,6 +107,7 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemet
|
||||
// custom collector in a way that only collects metrics for active containers.
|
||||
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) {
|
||||
var errorCount float64
|
||||
|
||||
defer func() {
|
||||
ch <- compbasemetrics.NewLazyConstMetric(resourceScrapeResultDesc, compbasemetrics.GaugeValue, errorCount)
|
||||
}()
|
||||
@@ -121,6 +122,7 @@ func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetri
|
||||
rc.collectContainerCPUMetrics(ch, pod, container)
|
||||
rc.collectContainerMemoryMetrics(ch, pod, container)
|
||||
}
|
||||
|
||||
rc.collectPodCPUMetrics(ch, pod)
|
||||
rc.collectPodMemoryMetrics(ch, pod)
|
||||
}
|
||||
|
||||
@@ -119,6 +119,7 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
|
||||
// If some node labels are specified only the matching nodes will be considered.
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if nodeLabels != nil {
|
||||
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
|
||||
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
|
||||
@@ -134,7 +135,6 @@ func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interfa
|
||||
virtualAvailableResources := corev1.ResourceList{}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
|
||||
// check if the node is Ready
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type != corev1.NodeReady {
|
||||
|
||||
@@ -110,24 +110,30 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
|
||||
Follow: opts.Follow,
|
||||
Previous: opts.Previous,
|
||||
}
|
||||
|
||||
if opts.Tail != 0 {
|
||||
tailLines := int64(opts.Tail)
|
||||
options.TailLines = &tailLines
|
||||
}
|
||||
|
||||
if opts.LimitBytes != 0 {
|
||||
limitBytes := int64(opts.LimitBytes)
|
||||
options.LimitBytes = &limitBytes
|
||||
}
|
||||
|
||||
if opts.SinceSeconds != 0 {
|
||||
sinceSeconds := int64(opts.SinceSeconds)
|
||||
options.SinceSeconds = &sinceSeconds
|
||||
}
|
||||
|
||||
if !opts.SinceTime.IsZero() {
|
||||
sinceTime := metav1.NewTime(opts.SinceTime)
|
||||
options.SinceTime = &sinceTime
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
|
||||
|
||||
return closer, err
|
||||
}
|
||||
|
||||
@@ -148,10 +154,12 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
@@ -179,10 +187,12 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
@@ -204,8 +214,10 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
|
||||
}
|
||||
|
||||
// fetch the stats from all the nodes
|
||||
var nodeStats statsv1alpha1.NodeStats
|
||||
var allPodsStats []statsv1alpha1.PodStats
|
||||
var (
|
||||
nodeStats statsv1alpha1.NodeStats
|
||||
allPodsStats []statsv1alpha1.PodStats
|
||||
)
|
||||
|
||||
for _, n := range nodeList.Items {
|
||||
res, err := p.CoreClient.RESTClient().
|
||||
@@ -240,6 +252,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
|
||||
}
|
||||
|
||||
podsNameMap := make(map[string]*v1.Pod)
|
||||
|
||||
for _, pod := range pods {
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
podsNameMap[hostPodName] = pod
|
||||
@@ -284,6 +297,7 @@ func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily,
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
|
||||
}
|
||||
|
||||
return metricFamily, nil
|
||||
}
|
||||
|
||||
@@ -300,9 +314,9 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL())
|
||||
portAsString := strconv.Itoa(int(port))
|
||||
|
||||
readyChannel := make(chan struct{})
|
||||
stopChannel := make(chan struct{}, 1)
|
||||
|
||||
@@ -333,7 +347,9 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.ClusterName,
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
@@ -347,6 +363,11 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if pod.Spec.Hostname == "" {
|
||||
tPod.Spec.Hostname = pod.Name
|
||||
}
|
||||
|
||||
// if the priorityCluss for the virtual cluster is set then override the provided value
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
@@ -368,11 +389,15 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
if err := p.transformTokens(ctx, pod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP)
|
||||
|
||||
p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name,
|
||||
"Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env)
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
p.logger.Infow("creating pod",
|
||||
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
}
|
||||
|
||||
@@ -382,7 +407,9 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
|
||||
interval = 2 * time.Second
|
||||
timeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var allErrors error
|
||||
|
||||
// retryFn will retry until the operation succeed, or the timeout occurs
|
||||
retryFn := func(ctx context.Context) (bool, error) {
|
||||
if lastErr := f(ctx, pod); lastErr != nil {
|
||||
@@ -390,11 +417,14 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
|
||||
allErrors = errors.Join(allErrors, lastErr)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, retryFn); err != nil {
|
||||
return errors.Join(allErrors, ErrRetryTimeout)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -403,6 +433,7 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
|
||||
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
var optional bool
|
||||
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
@@ -411,17 +442,21 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
if volume.ConfigMap.Optional != nil {
|
||||
optional = *volume.ConfigMap.Optional
|
||||
}
|
||||
|
||||
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
if volume.Secret.Optional != nil {
|
||||
optional = *volume.Secret.Optional
|
||||
}
|
||||
|
||||
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
@@ -429,15 +464,18 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
if source.ConfigMap.Optional != nil {
|
||||
optional = *source.ConfigMap.Optional
|
||||
}
|
||||
|
||||
configMapName := source.ConfigMap.Name
|
||||
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
|
||||
}
|
||||
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
|
||||
} else if source.Secret != nil {
|
||||
if source.Secret.Optional != nil {
|
||||
optional = *source.Secret.Optional
|
||||
}
|
||||
|
||||
secretName := source.Secret.Name
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
@@ -451,56 +489,65 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: configMapName,
|
||||
}
|
||||
err := p.VirtualClient.Get(ctx, nsName, &configMap)
|
||||
if err != nil {
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
|
||||
// check if its optional configmap
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
err = p.Handler.AddResource(ctx, &configMap)
|
||||
if err != nil {
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
|
||||
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
|
||||
|
||||
var secret corev1.Secret
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: secretName,
|
||||
}
|
||||
err := p.VirtualClient.Get(ctx, nsName, &secret)
|
||||
if err != nil {
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
err = p.Handler.AddResource(ctx, &secret)
|
||||
if err != nil {
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -590,16 +637,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Infof("Got request to delete pod %s", pod.Name)
|
||||
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
|
||||
// note that we don't return an error here. The pod was successfully deleted, another process
|
||||
// should clean this without affecting the user
|
||||
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Deleted pod %s", pod.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -610,6 +661,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
|
||||
// for pruning
|
||||
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
|
||||
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
|
||||
|
||||
var pods corev1.PodList
|
||||
// only pods in the same namespace could be using secrets/configmaps that this pod is using
|
||||
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
|
||||
@@ -618,35 +670,43 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
for _, vPod := range pods.Items {
|
||||
if vPod.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
|
||||
pruneSecrets.Delete(secrets...)
|
||||
pruneConfigMap.Delete(configMaps...)
|
||||
}
|
||||
|
||||
for _, secretName := range pruneSecrets.UnsortedList() {
|
||||
var secret corev1.Secret
|
||||
err := p.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: pod.Namespace,
|
||||
}, &secret)
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
|
||||
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
err = p.Handler.RemoveResource(ctx, &secret)
|
||||
if err != nil {
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, configMapName := range pruneConfigMap.UnsortedList() {
|
||||
var configMap corev1.ConfigMap
|
||||
err := p.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: configMapName,
|
||||
Namespace: pod.Namespace,
|
||||
}, &configMap)
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
|
||||
@@ -654,6 +714,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
|
||||
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -667,12 +728,15 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(namespace, name),
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
err := p.HostClient.Get(ctx, hostNamespaceName, &pod)
|
||||
if err != nil {
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
|
||||
return nil, fmt.Errorf("error when retrieving pod: %w", err)
|
||||
}
|
||||
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
|
||||
return &pod, nil
|
||||
}
|
||||
|
||||
@@ -682,11 +746,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
|
||||
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
|
||||
|
||||
pod, err := p.GetPod(ctx, namespace, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get pod for status: %w", err)
|
||||
}
|
||||
|
||||
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
|
||||
|
||||
return pod.Status.DeepCopy(), nil
|
||||
}
|
||||
|
||||
@@ -696,103 +763,112 @@ func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*c
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
selector := labels.NewSelector()
|
||||
|
||||
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create label selector: %w", err)
|
||||
}
|
||||
|
||||
selector = selector.Add(*requirement)
|
||||
|
||||
var podList corev1.PodList
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
retPods := []*corev1.Pod{}
|
||||
|
||||
for _, pod := range podList.DeepCopy().Items {
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
retPods = append(retPods, &pod)
|
||||
}
|
||||
|
||||
return retPods, nil
|
||||
}
|
||||
|
||||
// configureNetworking will inject network information to each pod to connect them to the
|
||||
// virtual cluster api server, as well as confiugre DNS information to connect them to the
|
||||
// synced coredns on the host cluster.
|
||||
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) {
|
||||
func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP string) {
|
||||
// inject serverIP to hostalias for the pod
|
||||
KubernetesHostAlias := corev1.HostAlias{
|
||||
IP: serverIP,
|
||||
Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"},
|
||||
}
|
||||
pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias)
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env,
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP_ADDR",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_HOST",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_PORT",
|
||||
Value: "6443",
|
||||
},
|
||||
)
|
||||
}
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env,
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP_ADDR",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_HOST",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_PORT",
|
||||
Value: "6443",
|
||||
},
|
||||
)
|
||||
}
|
||||
pod.Spec.HostAliases = append(pod.Spec.HostAliases, corev1.HostAlias{
|
||||
IP: serverIP,
|
||||
Hostnames: []string{
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
},
|
||||
})
|
||||
|
||||
// injecting cluster DNS IP to the pods except for coredns pod
|
||||
if !strings.HasPrefix(podName, "coredns") {
|
||||
pod.Spec.DNSPolicy = corev1.DNSNone
|
||||
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
|
||||
Nameservers: []string{
|
||||
p.dnsIP,
|
||||
dnsIP,
|
||||
},
|
||||
Searches: []string{
|
||||
podNamespace + ".svc.cluster.local", "svc.cluster.local", "cluster.local",
|
||||
podNamespace + ".svc.cluster.local",
|
||||
"svc.cluster.local",
|
||||
"cluster.local",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
updatedEnvVars := []corev1.EnvVar{
|
||||
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
|
||||
{Name: "KUBERNETES_SERVICE_PORT", Value: "6443"},
|
||||
{Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: "6443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_PORT", Value: "6443"},
|
||||
}
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
}
|
||||
|
||||
// overrideEnvVars will override the orig environment variables if found in the updated list
|
||||
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
if len(updated) == 0 {
|
||||
return orig
|
||||
}
|
||||
|
||||
// create map for single lookup
|
||||
updatedEnvVarMap := make(map[string]corev1.EnvVar)
|
||||
for _, updatedEnvVar := range updated {
|
||||
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
|
||||
}
|
||||
|
||||
for i, origEnvVar := range orig {
|
||||
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
|
||||
orig[i] = updatedEnvVar
|
||||
}
|
||||
}
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
|
||||
// for removing/seeing which virtual cluster resources need to be in the host cluster.
|
||||
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
var secrets []string
|
||||
var configMaps []string
|
||||
var (
|
||||
secrets []string
|
||||
configMaps []string
|
||||
)
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Secret != nil {
|
||||
secrets = append(secrets, volume.Secret.SecretName)
|
||||
@@ -808,6 +884,7 @@ func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, configMaps
|
||||
}
|
||||
|
||||
@@ -828,28 +905,33 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, envVar := range container.Env {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name, value := range pod.Annotations {
|
||||
if strings.Contains(name, webhook.FieldpathField) {
|
||||
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
|
||||
@@ -869,5 +951,6 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
delete(tPod.Annotations, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
71
k3k-kubelet/provider/provider_test.go
Normal file
71
k3k-kubelet/provider/provider_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_overrideEnvVars(t *testing.T) {
|
||||
type args struct {
|
||||
orig []corev1.EnvVar
|
||||
new []corev1.EnvVar
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []corev1.EnvVar
|
||||
}{
|
||||
{
|
||||
name: "orig and new are empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "only orig is empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig have multiple elements",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
{
|
||||
name: "orig and new have multiple elements and some not matching",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,12 +30,14 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
}
|
||||
|
||||
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
|
||||
|
||||
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// extracting the tokens data from the secret we just created
|
||||
virtualSecretKey := types.NamespacedName{
|
||||
Name: virtualSecret.Name,
|
||||
@@ -49,9 +51,11 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
if len(virtualSecret.Data) < 3 {
|
||||
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
|
||||
}
|
||||
|
||||
hostSecret := virtualSecret.DeepCopy()
|
||||
hostSecret.Type = ""
|
||||
hostSecret.Annotations = make(map[string]string)
|
||||
|
||||
p.Translator.TranslateTo(hostSecret)
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
|
||||
@@ -59,7 +63,9 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.translateToken(tPod, hostSecret.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,6 +102,7 @@ func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
newSize := remotecommand.TerminalSize{
|
||||
|
||||
return &remotecommand.TerminalSize{
|
||||
Width: size.Width,
|
||||
Height: size.Height,
|
||||
}
|
||||
return &newSize
|
||||
}
|
||||
|
||||
@@ -45,14 +45,17 @@ func (t *ToHostTranslator) TranslateTo(obj client.Object) {
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
|
||||
annotations[ResourceNameAnnotation] = obj.GetName()
|
||||
annotations[ResourceNamespaceAnnotation] = obj.GetNamespace()
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
// add a label to quickly identify objects owned by a given virtual cluster
|
||||
labels := obj.GetLabels()
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
labels[ClusterNameLabel] = t.ClusterName
|
||||
obj.SetLabels(labels)
|
||||
|
||||
@@ -77,6 +80,7 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
|
||||
// In this case, we need to have some sort of fallback or error return
|
||||
name := annotations[ResourceNameAnnotation]
|
||||
namespace := annotations[ResourceNamespaceAnnotation]
|
||||
|
||||
obj.SetName(name)
|
||||
obj.SetNamespace(namespace)
|
||||
delete(annotations, ResourceNameAnnotation)
|
||||
@@ -91,7 +95,6 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
|
||||
// resource version/UID won't match what's in the virtual cluster.
|
||||
obj.SetResourceVersion("")
|
||||
obj.SetUID("")
|
||||
|
||||
}
|
||||
|
||||
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
|
||||
@@ -106,5 +109,6 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
|
||||
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
|
||||
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
|
||||
nameSuffix := hex.EncodeToString([]byte(nameKey))
|
||||
|
||||
return controller.SafeConcatName(namePrefix, nameSuffix)
|
||||
}
|
||||
|
||||
8
main.go
8
main.go
@@ -82,9 +82,12 @@ func main() {
|
||||
if err := validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger = log.New(debug)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logger.Fatalw("failed to run k3k controller", zap.Error(err))
|
||||
}
|
||||
@@ -111,22 +114,26 @@ func run(clx *cli.Context) error {
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding etcd pod controller")
|
||||
|
||||
if err := cluster.AddPodController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterset controller")
|
||||
|
||||
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset controller: %v", err)
|
||||
}
|
||||
|
||||
if clusterCIDR == "" {
|
||||
logger.Info("adding networkpolicy node controller")
|
||||
|
||||
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
|
||||
}
|
||||
@@ -147,5 +154,6 @@ func validate() error {
|
||||
return errors.New("invalid value for shared agent image policy")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
image: rancher/k3k:{{replace "+" "-" build.tag}}
|
||||
manifests:
|
||||
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
@@ -1,16 +0,0 @@
|
||||
/*
|
||||
Copyright YEAR Rancher Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
46
ops/build
46
ops/build
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p bin deploy
|
||||
|
||||
if [ "$(uname)" = "Linux" ]; then
|
||||
OTHER_LINKFLAGS="-extldflags -static -s"
|
||||
fi
|
||||
|
||||
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
|
||||
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
|
||||
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
|
||||
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-s390x
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-arm64
|
||||
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd
|
||||
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-amd64
|
||||
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-aarch64
|
||||
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
|
||||
fi
|
||||
|
||||
# build k3k-kubelet
|
||||
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-s390x
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-kubelet-arm64
|
||||
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-freebsd
|
||||
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-amd64
|
||||
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-darwin-aarch64
|
||||
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-kubelet-windows
|
||||
fi
|
||||
|
||||
# build k3kcli
|
||||
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
|
||||
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-s390x ./cli
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-arm64 ./cli
|
||||
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-freebsd ./cli
|
||||
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-amd64 ./cli
|
||||
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-aarch64 ./cli
|
||||
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
|
||||
fi
|
||||
@@ -1,8 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
cd $(dirname $0)/../
|
||||
|
||||
# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
# allowDangerousTypes is needed for struct that use floats
|
||||
controller-gen crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false paths=./pkg/apis/... output:crd:dir=./charts/k3k/crds
|
||||
|
||||
16
ops/checksum
16
ops/checksum
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
CHECKSUM_DIR=${CHECKSUM_DIR:-./bin}
|
||||
|
||||
sumfile="${CHECKSUM_DIR}/sha256sum.txt"
|
||||
echo -n "" > "${sumfile}"
|
||||
|
||||
files=$(ls ${CHECKSUM_DIR} | grep -v "sha256sum.txt")
|
||||
for file in ${files}; do
|
||||
sha256sum "${CHECKSUM_DIR}/${file}" | sed "s;$(dirname ${CHECKSUM_DIR}/${file})/;;g" >> "${sumfile}"
|
||||
done
|
||||
|
||||
cat "${sumfile}"
|
||||
11
ops/ci
11
ops/ci
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
./build
|
||||
./checksum
|
||||
./test
|
||||
./validate
|
||||
./validate-ci
|
||||
./package
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
./build
|
||||
./test
|
||||
./package
|
||||
11
ops/entry
11
ops/entry
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
mkdir -p bin dist
|
||||
if [ -e ./ops/$1 ]; then
|
||||
./ops/"$@"
|
||||
else
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
chown -R $DAPPER_UID:$DAPPER_GID .
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
git fetch --tags
|
||||
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
|
||||
if [ $(git tag -l "$version") ]; then
|
||||
echo "tag already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# update the index.yaml
|
||||
cr index --token ${GITHUB_TOKEN} \
|
||||
--release-name-template "chart-{{ .Version }}" \
|
||||
--package-path ./deploy/ \
|
||||
--index-path index.yaml \
|
||||
--git-repo k3k \
|
||||
-o rancher
|
||||
|
||||
# push to gh-pages
|
||||
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
|
||||
git config --global user.name "galal-hussein"
|
||||
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
|
||||
|
||||
# push index.yaml to gh-pages
|
||||
git add index.yaml
|
||||
git commit -m "add chart-${CHART_TAG} to index.yaml"
|
||||
git push --force --set-upstream origin HEAD:gh-pages
|
||||
30
ops/package
30
ops/package
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p dist/artifacts
|
||||
cp bin/k3k dist/artifacts/k3k${SUFFIX}
|
||||
cp bin/k3kcli dist/artifacts/k3kcli${SUFFIX}
|
||||
cp bin/k3k-kubelet dist/artifacts/k3k-kubelet${SUFFIX}
|
||||
|
||||
IMAGE=${REPO}/k3k:${TAG}
|
||||
DOCKERFILE=package/Dockerfile
|
||||
if [ -e ${DOCKERFILE}.${ARCH} ]; then
|
||||
DOCKERFILE=${DOCKERFILE}.${ARCH}
|
||||
fi
|
||||
|
||||
docker build -f ${DOCKERFILE} -t ${IMAGE} .
|
||||
echo Built ${IMAGE}
|
||||
|
||||
# todo: This might need to go to it's own repo
|
||||
IMAGE=${REPO}/k3k:${TAG}-kubelet
|
||||
DOCKERFILE=package/Dockerfile.kubelet
|
||||
if [ -e ${DOCKERFILE}.${ARCH} ]; then
|
||||
DOCKERFILE=${DOCKERFILE}.${ARCH}
|
||||
fi
|
||||
|
||||
docker build -f ${DOCKERFILE} -t ${IMAGE} .
|
||||
echo Built ${IMAGE}
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p deploy/
|
||||
|
||||
cr package --package-path deploy/ charts/k3k
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec $(dirname $0)/ci
|
||||
9
ops/test
9
ops/test
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
if [ -z ${SKIP_TESTS} ]; then
|
||||
echo Running tests
|
||||
go test -cover -tags=test ./...
|
||||
fi
|
||||
21
ops/validate
21
ops/validate
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
echo Running validation
|
||||
|
||||
PACKAGES="$(go list ./...)"
|
||||
|
||||
if ! command -v golangci-lint; then
|
||||
echo Skipping validation: no golangci-lint available
|
||||
exit
|
||||
fi
|
||||
|
||||
echo Running validation
|
||||
|
||||
echo Running: golangci-lint
|
||||
golangci-lint run
|
||||
|
||||
echo Running: go fmt
|
||||
test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)"
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
go generate
|
||||
|
||||
source ./ops/version
|
||||
|
||||
if [ -n "$DIRTY" ]; then
|
||||
echo Git is dirty
|
||||
git status
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
33
ops/version
33
ops/version
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
DIRTY="-dirty"
|
||||
fi
|
||||
|
||||
COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG=${TAG:-$(git tag -l --contains HEAD | head -n 1)}
|
||||
|
||||
if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then
|
||||
VERSION=$GIT_TAG
|
||||
else
|
||||
VERSION="${COMMIT}${DIRTY}"
|
||||
fi
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
ARCH=$(go env GOHOSTARCH)
|
||||
fi
|
||||
|
||||
SUFFIX="-${ARCH}"
|
||||
|
||||
|
||||
if [[ $VERSION = "chart*" ]]; then
|
||||
TAG=${TAG:-${VERSION}}
|
||||
else
|
||||
TAG=${TAG:-${VERSION}${SUFFIX}}
|
||||
fi
|
||||
|
||||
REPO=${REPO:-rancher}
|
||||
|
||||
if echo $TAG | grep dirty; then
|
||||
TAG=dev
|
||||
fi
|
||||
@@ -25,5 +25,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
||||
&ClusterSetList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
type ClusterSet struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// +kubebuilder:default={}
|
||||
//
|
||||
// Spec is the spec of the ClusterSet
|
||||
Spec ClusterSetSpec `json:"spec"`
|
||||
|
||||
// Status is the status of the ClusterSet
|
||||
Status ClusterSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterSetSpec struct {
|
||||
// MaxLimits are the limits that apply to all clusters (server + agent) in the set
|
||||
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
|
||||
|
||||
// DefaultLimits are the limits used for servers/agents when a cluster in the set doesn't provide any
|
||||
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
|
||||
|
||||
// DefaultNodeSelector is the node selector that applies to all clusters (server + agent) in the set
|
||||
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
|
||||
|
||||
// DefaultPriorityClass is the priorityClassName applied to all pods of all clusters in the set
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// DisableNetworkPolicy is an option that will disable the creation of a default networkpolicy for cluster isolation
|
||||
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
|
||||
|
||||
// +kubebuilder:default={shared}
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
//
|
||||
// AllowedNodeTypes are the allowed cluster provisioning modes. Defaults to [shared].
|
||||
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
|
||||
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=privileged;baseline;restricted
|
||||
//
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
type PodSecurityAdmissionLevel string
|
||||
|
||||
const (
|
||||
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
|
||||
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
|
||||
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
|
||||
)
|
||||
|
||||
type ClusterSetStatus struct {
|
||||
|
||||
// ObservedGeneration was the generation at the time the status was updated.
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// LastUpdate is the timestamp when the status was last updated
|
||||
LastUpdate string `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// Summary is a summary of the status
|
||||
Summary string `json:"summary,omitempty"`
|
||||
|
||||
// Conditions are the invidual conditions for the cluster set
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
type ClusterSetList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Items []ClusterSet `json:"items"`
|
||||
}
|
||||
@@ -10,80 +10,37 @@ import (
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
|
||||
// Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
// k3k uses this to provision and manage these virtual clusters.
|
||||
type Cluster struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Spec defines the desired state of the Cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
|
||||
// Status reflects the observed state of the Cluster.
|
||||
//
|
||||
// +optional
|
||||
Status ClusterStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
type ClusterSpec struct {
|
||||
// Version is a string representing the Kubernetes version to be used by the virtual nodes.
|
||||
// Version is the K3s version to use for the virtual nodes.
|
||||
// It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
// If not specified, the Kubernetes version of the host node will be used.
|
||||
//
|
||||
// +optional
|
||||
Version string `json:"version"`
|
||||
|
||||
// Servers is the number of K3s pods to run in server (controlplane) mode.
|
||||
//
|
||||
// +kubebuilder:default=1
|
||||
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
|
||||
// +optional
|
||||
Servers *int32 `json:"servers"`
|
||||
|
||||
// Agents is the number of K3s pods to run in agent (worker) mode.
|
||||
//
|
||||
// +kubebuilder:default=0
|
||||
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
|
||||
// +optional
|
||||
Agents *int32 `json:"agents"`
|
||||
|
||||
// NodeSelector is the node selector that will be applied to all server/agent pods.
|
||||
// In "shared" mode the node selector will be applied also to the workloads.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// PriorityClass is the priorityClassName that will be applied to all server/agent pods.
|
||||
// In "shared" mode the priorityClassName will be applied also to the workloads.
|
||||
PriorityClass string `json:"priorityClass,omitempty"`
|
||||
|
||||
// Limit is the limits that apply for the server/worker nodes.
|
||||
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
|
||||
|
||||
// TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
|
||||
// assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
|
||||
// +optional
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
|
||||
|
||||
// ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16.
|
||||
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
|
||||
// ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16.
|
||||
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
|
||||
// ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
|
||||
// Defaults to 10.43.0.10.
|
||||
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
|
||||
// ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode.
|
||||
ServerArgs []string `json:"serverArgs,omitempty"`
|
||||
|
||||
// AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode.
|
||||
AgentArgs []string `json:"agentArgs,omitempty"`
|
||||
|
||||
// TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use.
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup.
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
|
||||
// Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared"
|
||||
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
// Defaults to "shared". This field is immutable.
|
||||
//
|
||||
// +kubebuilder:default="shared"
|
||||
// +kubebuilder:validation:Enum=shared;virtual
|
||||
@@ -91,49 +48,263 @@ type ClusterSpec struct {
|
||||
// +optional
|
||||
Mode ClusterMode `json:"mode,omitempty"`
|
||||
|
||||
// Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
|
||||
// persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
|
||||
// Servers specifies the number of K3s pods to run in server (control plane) mode.
|
||||
// Must be at least 1. Defaults to 1.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
|
||||
// +kubebuilder:default=1
|
||||
// +optional
|
||||
Servers *int32 `json:"servers"`
|
||||
|
||||
// Agents specifies the number of K3s pods to run in agent (worker) mode.
|
||||
// Must be 0 or greater. Defaults to 0.
|
||||
// This field is ignored in "shared" mode.
|
||||
//
|
||||
// +kubebuilder:default=0
|
||||
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
|
||||
// +optional
|
||||
Agents *int32 `json:"agents"`
|
||||
|
||||
// ClusterCIDR is the CIDR range for pod IPs.
|
||||
// Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
|
||||
// ServiceCIDR is the CIDR range for service IPs.
|
||||
// Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
|
||||
// ClusterDNS is the IP address for the CoreDNS service.
|
||||
// Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
|
||||
// Persistence specifies options for persisting etcd data.
|
||||
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
// A default StorageClass is required for dynamic persistence.
|
||||
//
|
||||
// +kubebuilder:default={type: "dynamic"}
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
|
||||
// Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
|
||||
// clusterIP which is relatively secure, but difficult to access outside of the cluster.
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
//
|
||||
// +optional
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
|
||||
// NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
|
||||
// In "shared" mode, this also applies to workloads.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// PriorityClass specifies the priorityClassName for server/agent pods.
|
||||
// In "shared" mode, this also applies to workloads.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass string `json:"priorityClass,omitempty"`
|
||||
|
||||
// Limit defines resource limits for server/agent nodes.
|
||||
//
|
||||
// +optional
|
||||
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
|
||||
|
||||
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
// +optional
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
// +optional
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// ServerArgs specifies ordered key-value pairs for K3s server pods.
|
||||
// Example: ["--tls-san=example.com"]
|
||||
//
|
||||
// +optional
|
||||
ServerArgs []string `json:"serverArgs,omitempty"`
|
||||
|
||||
// AgentArgs specifies ordered key-value pairs for K3s agent pods.
|
||||
// Example: ["--node-name=my-agent-node"]
|
||||
//
|
||||
// +optional
|
||||
AgentArgs []string `json:"agentArgs,omitempty"`
|
||||
|
||||
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
|
||||
//
|
||||
// +optional
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=shared;virtual
|
||||
// +kubebuilder:default="shared"
|
||||
//
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
type ClusterMode string
|
||||
|
||||
// +kubebuilder:default="dynamic"
|
||||
//
|
||||
const (
|
||||
// SharedClusterMode represents a cluster that shares resources with the host node.
|
||||
SharedClusterMode = ClusterMode("shared")
|
||||
|
||||
// VirtualClusterMode represents a cluster that runs in a virtual environment.
|
||||
VirtualClusterMode = ClusterMode("virtual")
|
||||
)
|
||||
|
||||
// PersistenceMode is the storage mode of a Cluster.
|
||||
//
|
||||
// +kubebuilder:default="dynamic"
|
||||
type PersistenceMode string
|
||||
|
||||
const (
|
||||
SharedClusterMode = ClusterMode("shared")
|
||||
VirtualClusterMode = ClusterMode("virtual")
|
||||
EphemeralNodeType = PersistenceMode("ephemeral")
|
||||
DynamicNodesType = PersistenceMode("dynamic")
|
||||
// EphemeralPersistenceMode represents a cluster with no data persistence.
|
||||
EphemeralPersistenceMode = PersistenceMode("ephemeral")
|
||||
|
||||
// DynamicPersistenceMode represents a cluster with dynamic data persistence using a PVC.
|
||||
DynamicPersistenceMode = PersistenceMode("dynamic")
|
||||
)
|
||||
|
||||
// ClusterLimit defines resource limits for server and agent nodes.
|
||||
type ClusterLimit struct {
|
||||
// ServerLimit is the limits (cpu/mem) that apply to the server nodes
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
// WorkerLimit is the limits (cpu/mem) that apply to the agent nodes
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
}
|
||||
|
||||
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
type Addon struct {
|
||||
// SecretNamespace is the namespace of the Secret.
|
||||
SecretNamespace string `json:"secretNamespace,omitempty"`
|
||||
SecretRef string `json:"secretRef,omitempty"`
|
||||
|
||||
// SecretRef is the name of the Secret.
|
||||
SecretRef string `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
// PersistenceConfig specifies options for persisting etcd data.
|
||||
type PersistenceConfig struct {
|
||||
// Type specifies the persistence mode.
|
||||
//
|
||||
// +kubebuilder:default="dynamic"
|
||||
Type PersistenceMode `json:"type"`
|
||||
|
||||
// StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +optional
|
||||
StorageClassName *string `json:"storageClassName,omitempty"`
|
||||
|
||||
// StorageRequestSize is the requested size for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +optional
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
|
||||
// ExposeConfig specifies options for exposing the API server.
|
||||
type ExposeConfig struct {
|
||||
// Ingress specifies options for exposing the API server through an Ingress.
|
||||
//
|
||||
// +optional
|
||||
Ingress *IngressConfig `json:"ingress,omitempty"`
|
||||
|
||||
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
|
||||
//
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
|
||||
|
||||
// NodePort specifies options for exposing the API server through NodePort.
|
||||
//
|
||||
// +optional
|
||||
NodePort *NodePortConfig `json:"nodePort,omitempty"`
|
||||
}
|
||||
|
||||
// IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
type IngressConfig struct {
|
||||
// Annotations specifies annotations to add to the Ingress.
|
||||
//
|
||||
// +optional
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// IngressClassName specifies the IngressClass to use for the Ingress.
|
||||
//
|
||||
// +optional
|
||||
IngressClassName string `json:"ingressClassName,omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
type LoadBalancerConfig struct{}
|
||||
|
||||
// NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
type NodePortConfig struct {
|
||||
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ServerPort *int32 `json:"serverPort,omitempty"`
|
||||
|
||||
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ServicePort *int32 `json:"servicePort,omitempty"`
|
||||
|
||||
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterStatus reflects the observed state of a Cluster.
|
||||
type ClusterStatus struct {
|
||||
// HostVersion is the Kubernetes version of the host node.
|
||||
//
|
||||
// +optional
|
||||
HostVersion string `json:"hostVersion,omitempty"`
|
||||
|
||||
// ClusterCIDR is the CIDR range for pod IPs.
|
||||
//
|
||||
// +optional
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
|
||||
// ServiceCIDR is the CIDR range for service IPs.
|
||||
//
|
||||
// +optional
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
|
||||
// ClusterDNS is the IP address for the CoreDNS service.
|
||||
//
|
||||
// +optional
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
// +optional
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// Persistence specifies options for persisting etcd data.
|
||||
//
|
||||
// +optional
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ClusterList is a list of Cluster resources.
|
||||
type ClusterList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
@@ -141,53 +312,119 @@ type ClusterList struct {
|
||||
Items []Cluster `json:"items"`
|
||||
}
|
||||
|
||||
type PersistenceConfig struct {
|
||||
// +kubebuilder:default="dynamic"
|
||||
Type PersistenceMode `json:"type"`
|
||||
StorageClassName *string `json:"storageClassName,omitempty"`
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
// It allows defining common configurations and constraints for the clusters within the set.
|
||||
type ClusterSet struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Spec defines the desired state of the ClusterSet.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
Spec ClusterSetSpec `json:"spec"`
|
||||
|
||||
// Status reflects the observed state of the ClusterSet.
|
||||
//
|
||||
// +optional
|
||||
Status ClusterSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type ExposeConfig struct {
|
||||
// ClusterSetSpec defines the desired state of a ClusterSet.
|
||||
type ClusterSetSpec struct {
|
||||
|
||||
// DefaultLimits specifies the default resource limits for servers/agents when a cluster in the set doesn't provide any.
|
||||
//
|
||||
// +optional
|
||||
Ingress *IngressConfig `json:"ingress,omitempty"`
|
||||
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
|
||||
//
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
|
||||
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
|
||||
|
||||
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the set.
|
||||
//
|
||||
// +optional
|
||||
NodePort *NodePortConfig `json:"nodePort,omitempty"`
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// MaxLimits specifies the maximum resource limits that apply to all clusters (server + agent) in the set.
|
||||
//
|
||||
// +optional
|
||||
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
|
||||
|
||||
// AllowedNodeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
|
||||
//
|
||||
// +kubebuilder:default={shared}
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// +optional
|
||||
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
|
||||
|
||||
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
|
||||
//
|
||||
// +optional
|
||||
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
|
||||
|
||||
// PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace.
|
||||
//
|
||||
// +optional
|
||||
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
|
||||
}
|
||||
|
||||
type IngressConfig struct {
|
||||
// Annotations is a key value map that will enrich the Ingress annotations
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=privileged;baseline;restricted
|
||||
type PodSecurityAdmissionLevel string
|
||||
|
||||
const (
|
||||
// PrivilegedPodSecurityAdmissionLevel allows all pods to be admitted.
|
||||
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
|
||||
|
||||
// BaselinePodSecurityAdmissionLevel enforces a baseline level of security restrictions.
|
||||
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
|
||||
|
||||
// RestrictedPodSecurityAdmissionLevel enforces stricter security restrictions.
|
||||
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
|
||||
)
|
||||
|
||||
// ClusterSetStatus reflects the observed state of a ClusterSet.
|
||||
type ClusterSetStatus struct {
|
||||
// ObservedGeneration was the generation at the time the status was updated.
|
||||
//
|
||||
// +optional
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
IngressClassName string `json:"ingressClassName,omitempty"`
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// LastUpdate is the timestamp when the status was last updated.
|
||||
//
|
||||
// +optional
|
||||
LastUpdate string `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// Summary is a summary of the status.
|
||||
//
|
||||
// +optional
|
||||
Summary string `json:"summary,omitempty"`
|
||||
|
||||
// Conditions are the individual conditions for the cluster set.
|
||||
//
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
type LoadBalancerConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
type NodePortConfig struct {
|
||||
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767)
|
||||
// +optional
|
||||
ServerPort *int32 `json:"serverPort,omitempty"`
|
||||
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767)
|
||||
// +optional
|
||||
ServicePort *int32 `json:"servicePort,omitempty"`
|
||||
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767)
|
||||
// +optional
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
}
|
||||
// ClusterSetList is a list of ClusterSet resources.
|
||||
type ClusterSetList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
type ClusterStatus struct {
|
||||
HostVersion string `json:"hostVersion,omitempty"`
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
Items []ClusterSet `json:"items"`
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ func CreateClientCertKey(commonName string, organization []string, altNames *cer
|
||||
if altNames != nil {
|
||||
cfg.AltNames = *altNames
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -59,6 +60,7 @@ func generateKey() (data []byte, err error) {
|
||||
|
||||
func AddSANs(sans []string) certutil.AltNames {
|
||||
var altNames certutil.AltNames
|
||||
|
||||
for _, san := range sans {
|
||||
ip := net.ParseIP(san)
|
||||
if ip == nil {
|
||||
@@ -67,5 +69,6 @@ func AddSANs(sans []string) certutil.AltNames {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
|
||||
return altNames
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
@@ -47,8 +46,8 @@ func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object
|
||||
})
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key, "result, result")
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key, "result", result)
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
@@ -58,11 +57,11 @@ func (s *SharedAgent) EnsureResources(ctx context.Context) error {
|
||||
s.role(ctx),
|
||||
s.roleBinding(ctx),
|
||||
s.service(ctx),
|
||||
s.deployment(ctx),
|
||||
s.daemonset(ctx),
|
||||
s.dnsService(ctx),
|
||||
s.webhookTLS(ctx),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to ensure some resources: %w\n", err)
|
||||
return fmt.Errorf("failed to ensure some resources: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -97,6 +96,7 @@ func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) s
|
||||
if cluster.Spec.Version == "" {
|
||||
version = cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`clusterName: %s
|
||||
clusterNamespace: %s
|
||||
serverIP: %s
|
||||
@@ -106,16 +106,16 @@ version: %s`,
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) deployment(ctx context.Context) error {
|
||||
func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
labels := map[string]string{
|
||||
"cluster": s.cluster.Name,
|
||||
"type": "agent",
|
||||
"mode": "shared",
|
||||
}
|
||||
|
||||
deploy := &apps.Deployment{
|
||||
deploy := &apps.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -123,7 +123,7 @@ func (s *SharedAgent) deployment(ctx context.Context) error {
|
||||
Namespace: s.cluster.Namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
@@ -140,10 +140,9 @@ func (s *SharedAgent) deployment(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
var limit v1.ResourceList
|
||||
|
||||
return v1.PodSpec{
|
||||
ServiceAccountName: s.Name(),
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
@@ -188,7 +187,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
Image: s.image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: limit,
|
||||
Limits: v1.ResourceList{},
|
||||
},
|
||||
Args: []string{
|
||||
"--config",
|
||||
@@ -345,6 +344,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
|
||||
Resources: []string{"clusters"},
|
||||
Verbs: []string{"get", "watch", "list"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"coordination.k8s.io"},
|
||||
Resources: []string{"leases"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -390,7 +394,7 @@ func (s *SharedAgent) webhookTLS(ctx context.Context) error {
|
||||
},
|
||||
}
|
||||
|
||||
key := client.ObjectKeyFromObject(webhookSecret)
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(webhookSecret)
|
||||
if err := s.client.Get(ctx, key, webhookSecret); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
@@ -402,6 +406,7 @@ func (s *SharedAgent) webhookTLS(ctx context.Context) error {
|
||||
}
|
||||
|
||||
altNames := []string{s.Name(), s.cluster.Name}
|
||||
|
||||
webhookCert, webhookKey, err := newWebhookCerts(s.Name(), altNames, caPrivateKeyPEM, caCertPEM)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -16,6 +16,7 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
ip string
|
||||
token string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -100,6 +101,7 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip)
|
||||
|
||||
@@ -41,7 +41,7 @@ func (v *VirtualAgent) EnsureResources(ctx context.Context) error {
|
||||
v.config(ctx),
|
||||
v.deployment(ctx),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to ensure some resources: %w\n", err)
|
||||
return fmt.Errorf("failed to ensure some resources: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -81,6 +81,7 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
image := controller.K3SImage(v.cluster)
|
||||
|
||||
const name = "k3k-agent"
|
||||
|
||||
selector := metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"cluster": v.cluster.Name,
|
||||
@@ -116,7 +117,9 @@ func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
|
||||
func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelector *metav1.LabelSelector) v1.PodSpec {
|
||||
var limit v1.ResourceList
|
||||
|
||||
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
|
||||
@@ -12,6 +12,7 @@ func Test_virtualAgentData(t *testing.T) {
|
||||
serviceIP string
|
||||
token string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
@@ -30,6 +31,7 @@ func Test_virtualAgentData(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := virtualAgentData(tt.args.serviceIP, tt.args.token)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -39,8 +40,10 @@ const (
|
||||
|
||||
maxConcurrentReconciles = 1
|
||||
|
||||
defaultClusterCIDR = "10.44.0.0/16"
|
||||
defaultClusterServiceCIDR = "10.45.0.0/16"
|
||||
defaultVirtualClusterCIDR = "10.52.0.0/16"
|
||||
defaultVirtualServiceCIDR = "10.53.0.0/16"
|
||||
defaultSharedClusterCIDR = "10.42.0.0/16"
|
||||
defaultSharedServiceCIDR = "10.43.0.0/16"
|
||||
defaultStoragePersistentSize = "1G"
|
||||
memberRemovalTimeout = time.Minute * 1
|
||||
)
|
||||
@@ -170,18 +173,40 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
|
||||
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
|
||||
if cluster.Status.ClusterCIDR == "" {
|
||||
cluster.Status.ClusterCIDR = defaultClusterCIDR
|
||||
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
|
||||
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
|
||||
}
|
||||
}
|
||||
|
||||
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
|
||||
if cluster.Status.ServiceCIDR == "" {
|
||||
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
|
||||
// in shared mode try to lookup the serviceCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
|
||||
log.Info("looking up Service CIDR for shared mode")
|
||||
|
||||
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, "error while looking up Cluster Service CIDR")
|
||||
|
||||
cluster.Status.ServiceCIDR = defaultSharedServiceCIDR
|
||||
}
|
||||
}
|
||||
|
||||
// in virtual mode assign a default serviceCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
|
||||
log.Info("assign default service CIDR for virtual mode")
|
||||
|
||||
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
|
||||
}
|
||||
}
|
||||
|
||||
service, err := c.ensureClusterService(ctx, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceIP := service.Spec.ClusterIP
|
||||
|
||||
if err := c.createClusterConfigs(ctx, cluster, s, serviceIP); err != nil {
|
||||
@@ -232,8 +257,10 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
|
||||
bootstrapSecret.Data = map[string][]byte{
|
||||
"bootstrap": bootstrapData,
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -259,9 +286,11 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, serverConfig); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
@@ -284,8 +313,10 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
}
|
||||
|
||||
currentService.Spec = expectedService.Spec
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -321,6 +352,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -341,23 +373,31 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
serverStatefulSet, err := server.StatefulServer(ctx)
|
||||
expectedServerStatefulSet, err := server.StatefulServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, serverStatefulSet, func() error {
|
||||
return controllerutil.SetControllerReference(cluster, serverStatefulSet, c.Scheme)
|
||||
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentServerStatefulSet.Spec = expectedServerStatefulSet.Spec
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
key := client.ObjectKeyFromObject(serverStatefulSet)
|
||||
key := client.ObjectKeyFromObject(currentServerStatefulSet)
|
||||
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
|
||||
}
|
||||
|
||||
@@ -373,6 +413,7 @@ func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluste
|
||||
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
|
||||
|
||||
found := false
|
||||
|
||||
for _, subject := range clusterRoleBinding.Subjects {
|
||||
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
|
||||
found = true
|
||||
@@ -407,5 +448,83 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
|
||||
if cluster.Name == ClusterInvalidName {
|
||||
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lookupServiceCIDR attempts to determine the cluster's service CIDR.
|
||||
// It first attempts to create a failing Service (with an invalid cluster IP)and extracts the expected CIDR from the resulting error.
|
||||
// If that fails, it searches the 'kube-apiserver' Pod's arguments for the --service-cluster-ip-range flag.
|
||||
func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// Try to look for the serviceCIDR creating a failing service.
|
||||
// The error should contain the expected serviceCIDR
|
||||
|
||||
log.Info("looking up serviceCIDR from a failing service creation")
|
||||
|
||||
failingSvc := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
|
||||
Spec: v1.ServiceSpec{ClusterIP: "1.1.1.1"},
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, &failingSvc); err != nil {
|
||||
splittedErrMsg := strings.Split(err.Error(), "The range of valid IPs is ")
|
||||
|
||||
if len(splittedErrMsg) > 1 {
|
||||
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
|
||||
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return serviceCIDRAddr.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
|
||||
|
||||
log.Info("looking up serviceCIDR from kube-apiserver pod")
|
||||
|
||||
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{
|
||||
"component": "kube-apiserver",
|
||||
"tier": "control-plane",
|
||||
})
|
||||
listOpts := &ctrlruntimeclient.ListOptions{Namespace: "kube-system"}
|
||||
matchingLabels.ApplyToList(listOpts)
|
||||
|
||||
var podList v1.PodList
|
||||
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
apiServerPod := podList.Items[0]
|
||||
apiServerArgs := apiServerPod.Spec.Containers[0].Args
|
||||
|
||||
for _, arg := range apiServerArgs {
|
||||
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
|
||||
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
|
||||
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
if err != nil {
|
||||
log.Error(err, "serviceCIDR is not valid")
|
||||
break
|
||||
}
|
||||
|
||||
return serviceCIDRAddr.String(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("cannot find serviceCIDR from lookup")
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
|
||||
for _, pod := range podList.Items {
|
||||
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
|
||||
|
||||
if err := c.Client.Update(ctx, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -47,10 +48,12 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
|
||||
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
|
||||
// remove finalizer from the cluster and update it.
|
||||
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -63,6 +66,7 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus
|
||||
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
|
||||
|
||||
var cleanedSubjects []rbacv1.Subject
|
||||
|
||||
for _, subject := range clusterRoleBinding.Subjects {
|
||||
if subject.Name != subjectName || subject.Namespace != cluster.Namespace {
|
||||
cleanedSubjects = append(cleanedSubjects, subject)
|
||||
@@ -75,5 +79,6 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus
|
||||
}
|
||||
|
||||
clusterRoleBinding.Subjects = cleanedSubjects
|
||||
|
||||
return c.Client.Update(ctx, clusterRoleBinding)
|
||||
}
|
||||
|
||||
@@ -66,16 +66,20 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
if len(s) < 1 {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if s[0] != "k3k" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
clusterName := s[1]
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
|
||||
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
|
||||
matchingLabels.ApplyToList(listOpts)
|
||||
@@ -84,14 +88,17 @@ func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 1 {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -115,16 +122,20 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
|
||||
if cluster.Name == "" {
|
||||
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove server from etcd
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{
|
||||
@@ -143,11 +154,13 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
|
||||
// remove our finalizer from the list and update it.
|
||||
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
|
||||
return p.Client.Update(ctx, pod)
|
||||
@@ -164,9 +177,11 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
|
||||
return true
|
||||
}, func() error {
|
||||
@@ -181,6 +196,7 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -190,6 +206,7 @@ func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(cert[0])
|
||||
|
||||
@@ -206,6 +223,7 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
|
||||
defer cancel()
|
||||
|
||||
members, err := client.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -215,6 +233,7 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
if !strings.Contains(member.Name, name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, peerURL := range member.PeerURLs {
|
||||
u, err := url.Parse(peerURL)
|
||||
if err != nil {
|
||||
@@ -224,9 +243,11 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
if u.Hostname() == address {
|
||||
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
|
||||
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -237,18 +258,23 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
|
||||
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
|
||||
var tokenSecret v1.Secret
|
||||
|
||||
nn := types.NamespacedName{
|
||||
Name: TokenSecretName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if cluster.Spec.TokenSecretRef != nil {
|
||||
nn.Name = TokenSecretName(cluster.Name)
|
||||
}
|
||||
|
||||
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, ok := tokenSecret.Data["token"]; !ok {
|
||||
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
@@ -45,7 +45,6 @@ func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, t
|
||||
}
|
||||
|
||||
return json.Marshal(bootstrap)
|
||||
|
||||
}
|
||||
|
||||
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
|
||||
@@ -64,6 +63,7 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
|
||||
|
||||
resp, err := client.Do(req)
|
||||
@@ -91,6 +91,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ClientCA.Content = string(decoded)
|
||||
|
||||
//client-ca-key
|
||||
@@ -98,6 +99,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ClientCAKey.Content = string(decoded)
|
||||
|
||||
//server-ca
|
||||
@@ -105,6 +107,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ServerCA.Content = string(decoded)
|
||||
|
||||
//server-ca-key
|
||||
@@ -112,6 +115,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ServerCAKey.Content = string(decoded)
|
||||
|
||||
//etcd-ca
|
||||
@@ -119,6 +123,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ETCDServerCA.Content = string(decoded)
|
||||
|
||||
//etcd-ca-key
|
||||
@@ -126,6 +131,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrap.ETCDServerCAKey.Content = string(decoded)
|
||||
|
||||
return nil
|
||||
@@ -162,5 +168,6 @@ func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.
|
||||
|
||||
var bootstrap ControlRuntimeBootstrap
|
||||
err := json.Unmarshal(bootstrapData, &bootstrap)
|
||||
|
||||
return &bootstrap, err
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
|
||||
if init {
|
||||
config = initConfigData(s.cluster, s.token)
|
||||
}
|
||||
|
||||
return &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
@@ -52,21 +53,26 @@ func serverOptions(cluster *v1alpha1.Cluster, token string) string {
|
||||
if token != "" {
|
||||
opts = "token: " + token + "\n"
|
||||
}
|
||||
|
||||
if cluster.Status.ClusterCIDR != "" {
|
||||
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
|
||||
}
|
||||
|
||||
if cluster.Status.ServiceCIDR != "" {
|
||||
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
|
||||
}
|
||||
|
||||
if cluster.Spec.ClusterDNS != "" {
|
||||
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
|
||||
}
|
||||
|
||||
if len(cluster.Status.TLSSANs) > 0 {
|
||||
opts = opts + "tls-san:\n"
|
||||
for _, addr := range cluster.Status.TLSSANs {
|
||||
opts = opts + "- " + addr + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
if cluster.Spec.Mode != agent.VirtualNodeMode {
|
||||
opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage"
|
||||
}
|
||||
@@ -79,5 +85,6 @@ func configSecretName(clusterName string, init bool) string {
|
||||
if !init {
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, configName)
|
||||
}
|
||||
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, initConfigName)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
|
||||
limit = s.cluster.Spec.Limit.ServerLimit
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
PriorityClassName: s.cluster.Spec.PriorityClass,
|
||||
@@ -218,6 +219,7 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
Privileged: ptr.To(true),
|
||||
}
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -228,18 +230,22 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
err error
|
||||
persistent bool
|
||||
)
|
||||
|
||||
image := controller.K3SImage(s.cluster)
|
||||
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
|
||||
|
||||
replicas = *s.cluster.Spec.Servers
|
||||
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
|
||||
persistent = true
|
||||
pvClaim = s.setupDynamicPersistence()
|
||||
}
|
||||
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
var (
|
||||
volumes []v1.Volume
|
||||
volumeMounts []v1.VolumeMount
|
||||
)
|
||||
|
||||
for _, addon := range s.cluster.Spec.Addons {
|
||||
namespace := k3kSystemNamespace
|
||||
if addon.SecretNamespace != "" {
|
||||
@@ -306,6 +312,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podSpec := s.podSpec(image, name, persistent, startupCommand)
|
||||
podSpec.Volumes = append(podSpec.Volumes, volumes...)
|
||||
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
|
||||
@@ -332,7 +339,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
},
|
||||
},
|
||||
}
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
|
||||
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
|
||||
}
|
||||
|
||||
@@ -359,7 +366,6 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) setupStartCommand() (string, error) {
|
||||
@@ -369,10 +375,12 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
if *s.cluster.Spec.Servers > 1 {
|
||||
tmpl = HAServerTemplate
|
||||
}
|
||||
|
||||
tmplCmd, err := template.New("").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := tmplCmd.Execute(&output, map[string]string{
|
||||
"ETCD_DIR": "/var/lib/rancher/k3s/server/db/etcd",
|
||||
"INIT_CONFIG": "/opt/rancher/k3s/init/config.yaml",
|
||||
@@ -381,5 +389,6 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
@@ -54,9 +54,11 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
if nodePortConfig.ServerPort != nil {
|
||||
k3sServerPort.NodePort = *nodePortConfig.ServerPort
|
||||
}
|
||||
|
||||
if nodePortConfig.ServicePort != nil {
|
||||
k3sServicePort.NodePort = *nodePortConfig.ServicePort
|
||||
}
|
||||
|
||||
if nodePortConfig.ETCDPort != nil {
|
||||
etcdPort.NodePort = *nodePortConfig.ETCDPort
|
||||
}
|
||||
|
||||
@@ -26,13 +26,17 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
|
||||
Name: cluster.Spec.TokenSecretRef.Name,
|
||||
Namespace: cluster.Spec.TokenSecretRef.Namespace,
|
||||
}
|
||||
|
||||
var tokenSecret v1.Secret
|
||||
|
||||
if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, ok := tokenSecret.Data["token"]; !ok {
|
||||
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
@@ -75,15 +79,16 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
|
||||
}
|
||||
|
||||
return token, err
|
||||
|
||||
}
|
||||
|
||||
func random(size int) (string, error) {
|
||||
token := make([]byte, size)
|
||||
|
||||
_, err := rand.Read(token)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(token), err
|
||||
}
|
||||
|
||||
|
||||
@@ -67,10 +67,13 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
// namespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
var set v1alpha1.ClusterSetList
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetName()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
@@ -87,10 +90,13 @@ func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
// sameNamespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
func sameNamespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
var set v1alpha1.ClusterSetList
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetNamespace()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
@@ -199,6 +205,7 @@ func netpol(ctx context.Context, clusterCIDR string, clusterSet *v1alpha1.Cluste
|
||||
if err := client.List(ctx, &nodeList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
cidrList = append(cidrList, node.Spec.PodCIDRs...)
|
||||
}
|
||||
@@ -261,6 +268,7 @@ func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.C
|
||||
log.Info("reconciling Namespace")
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
key := types.NamespacedName{Name: clusterSet.Namespace}
|
||||
if err := c.Client.Get(ctx, key, &ns); err != nil {
|
||||
return err
|
||||
@@ -295,8 +303,10 @@ func (c *ClusterSetReconciler) reconcileNamespacePodSecurityLabels(ctx context.C
|
||||
log.V(1).Info("labels changed, updating namespace")
|
||||
|
||||
ns.Labels = newLabels
|
||||
|
||||
return c.Client.Update(ctx, &ns)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetLi
|
||||
log.Info("ensuring network policies")
|
||||
|
||||
var setNetworkPolicy *networkingv1.NetworkPolicy
|
||||
|
||||
for _, cs := range clusterSetList.Items {
|
||||
if cs.Spec.DisableNetworkPolicy {
|
||||
continue
|
||||
@@ -78,14 +79,17 @@ func (n *NodeReconciler) ensureNetworkPolicies(ctx context.Context, clusterSetLi
|
||||
|
||||
var err error
|
||||
setNetworkPolicy, err = netpol(ctx, "", &cs, n.Client)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("new NetworkPolicy for clusterset")
|
||||
|
||||
if err := n.Client.Update(ctx, setNetworkPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,9 @@ func SafeConcatName(name ...string) string {
|
||||
if len(fullPath) < 64 {
|
||||
return fullPath
|
||||
}
|
||||
|
||||
digest := sha256.Sum256([]byte(fullPath))
|
||||
|
||||
// since we cut the string in the middle, the last char may not be compatible with what is expected in k8s
|
||||
// we are checking and if necessary removing the last char
|
||||
c := fullPath[56]
|
||||
|
||||
@@ -40,6 +40,7 @@ func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverCACert := []byte(bootstrapData.ServerCA.Content)
|
||||
|
||||
adminCert, adminKey, err := certs.CreateClientCertKey(
|
||||
@@ -110,6 +111,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
|
||||
expose := cluster.Spec.Expose
|
||||
if expose != nil && expose.Ingress != nil {
|
||||
var k3kIngress networkingv1.Ingress
|
||||
|
||||
ingressKey := types.NamespacedName{
|
||||
Name: server.IngressName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
@@ -118,6 +120,7 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
|
||||
if err := client.Get(ctx, ingressKey, &k3kIngress); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
url = fmt.Sprintf("https://%s", k3kIngress.Spec.Rules[0].Host)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e pipefail
|
||||
set -eou pipefail
|
||||
|
||||
TAG=$(git describe --tag --always --match="v[0-9]*")
|
||||
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${VERSION}\""
|
||||
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
TAG="${TAG}-dirty"
|
||||
fi
|
||||
|
||||
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${TAG}\""
|
||||
|
||||
echo "Building k3k..."
|
||||
echo "Current TAG: ${TAG}"
|
||||
echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
|
||||
echo "Current TAG: ${VERSION} "
|
||||
|
||||
export CGO_ENABLED=0
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k
|
||||
@@ -19,6 +13,3 @@ GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-
|
||||
|
||||
# build the cli for the local OS and ARCH
|
||||
go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli
|
||||
|
||||
docker build -f package/Dockerfile -t rancher/k3k:dev -t rancher/k3k:${TAG} .
|
||||
docker build -f package/Dockerfile.kubelet -t rancher/k3k-kubelet:dev -t rancher/k3k-kubelet:${TAG} .
|
||||
|
||||
@@ -67,7 +67,7 @@ var _ = When("a ephemeral cluster is installed", func() {
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.EphemeralNodeType,
|
||||
Type: v1alpha1.EphemeralPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -134,7 +134,7 @@ var _ = When("a ephemeral cluster is installed", func() {
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.EphemeralNodeType,
|
||||
Type: v1alpha1.EphemeralPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -223,7 +223,7 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.DynamicNodesType,
|
||||
Type: v1alpha1.DynamicPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -290,7 +290,7 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.DynamicNodesType,
|
||||
Type: v1alpha1.DynamicPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -55,17 +55,19 @@ func NewVirtualCluster(cluster v1alpha1.Cluster) {
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
}
|
||||
|
||||
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
|
||||
func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
|
||||
GinkgoHelper()
|
||||
|
||||
var err error
|
||||
var (
|
||||
err error
|
||||
config *clientcmdapi.Config
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var config *clientcmdapi.Config
|
||||
Eventually(func() error {
|
||||
vKubeconfig := kubeconfig.New()
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
|
||||
@@ -161,8 +161,10 @@ func buildScheme() *runtime.Scheme {
|
||||
}
|
||||
|
||||
func writeK3kLogs() {
|
||||
var err error
|
||||
var podList v1.PodList
|
||||
var (
|
||||
err error
|
||||
podList v1.PodList
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"})
|
||||
@@ -176,11 +178,14 @@ func writeK3kLogs() {
|
||||
}
|
||||
|
||||
func writeLogs(filename string, logs io.ReadCloser) {
|
||||
defer logs.Close()
|
||||
|
||||
logsStr, err := io.ReadAll(logs)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
defer logs.Close()
|
||||
|
||||
tempfile := path.Join(os.TempDir(), filename)
|
||||
err = os.WriteFile(tempfile, []byte(logsStr), 0644)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
fmt.Fprintln(GinkgoWriter, "logs written to: "+filename)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user