mirror of
https://github.com/rancher/k3k.git
synced 2026-02-17 03:19:59 +00:00
Compare commits
40 Commits
v0.3.4
...
chart-1.0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
49a8d2a0ba | ||
|
|
2e6de51dab | ||
|
|
90aecbbb42 | ||
|
|
af9e1d6ca7 | ||
|
|
ae380fa8e9 | ||
|
|
c34cf9ce94 | ||
|
|
bf70e0d171 | ||
|
|
cebf6594c4 | ||
|
|
075d72df5d | ||
|
|
ee7eac89ce | ||
|
|
514fdf6b86 | ||
|
|
730e4e1c79 | ||
|
|
a3076af38f | ||
|
|
89dc352bea | ||
|
|
7644406eeb | ||
|
|
2206632dcc | ||
|
|
8ffdc9bafd | ||
|
|
594c2571c3 | ||
|
|
12971f55a6 | ||
|
|
99f750525f | ||
|
|
a0fd472841 | ||
|
|
7387fc1b23 | ||
|
|
9f265c73d9 | ||
|
|
00ef6d582c | ||
|
|
5c95ca3dfa | ||
|
|
6523b8339b | ||
|
|
80037e815f | ||
|
|
7585611792 | ||
|
|
0bd681ab60 | ||
|
|
4fe36b3d0c | ||
|
|
01589bb359 | ||
|
|
30217df268 | ||
|
|
04198652d5 | ||
|
|
72eb819216 | ||
|
|
4d4003f6f9 | ||
|
|
aca01127f8 | ||
|
|
1550c6b45a | ||
|
|
caf785f23b | ||
|
|
b3f7a8ab7f | ||
|
|
bd2494a0a9 |
125
.github/workflows/test-conformance-virtual.yaml
vendored
Normal file
125
.github/workflows/test-conformance-virtual.yaml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: Conformance Tests - Virtual Mode
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
conformance:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
|
||||
- name: Install k3s
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
K3S_HOST_VERSION: v1.32.1+k3s1
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Build, package and setup K3k
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
export REPO=ttl.sh/$(uuidgen)
|
||||
export VERSION=1h
|
||||
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Create virtual cluster
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
k3kcli cluster create --mode=virtual --servers=2 mycluster
|
||||
|
||||
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run conformance tests (parallel)
|
||||
if: matrix.type == 'parallel'
|
||||
run: |
|
||||
# Run conformance tests in parallel mode (skipping serial)
|
||||
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Run conformance tests (serial)
|
||||
if: matrix.type == 'serial'
|
||||
run: |
|
||||
# Run serial conformance tests
|
||||
hydrophone --focus='\[Serial\].*\[Conformance\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Export logs
|
||||
if: always()
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
|
||||
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
|
||||
|
||||
- name: Archive K3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-${{ matrix.type }}-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive K3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-${{ matrix.type }}-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.type }}-logs
|
||||
path: /tmp/e2e.log
|
||||
4
.github/workflows/test-conformance.yaml
vendored
4
.github/workflows/test-conformance.yaml
vendored
@@ -106,7 +106,7 @@ jobs:
|
||||
kubectl create namespace k3k-mycluster
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
@@ -259,7 +259,7 @@ jobs:
|
||||
kubectl create namespace k3k-mycluster
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
|
||||
184
.github/workflows/test-e2e.yaml
vendored
Normal file
184
.github/workflows/test-e2e.yaml
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
name: Tests E2E
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make E2E_LABEL_FILTER="e2e && !slow" test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
tests-e2e-slow:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make E2E_LABEL_FILTER="e2e && slow" test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
59
.github/workflows/test.yaml
vendored
59
.github/workflows/test.yaml
vendored
@@ -62,59 +62,6 @@ jobs:
|
||||
files: ./cover.out
|
||||
flags: unit
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
tests-cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
@@ -133,12 +80,13 @@ jobs:
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
@@ -152,6 +100,9 @@ jobs:
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run cli tests
|
||||
env:
|
||||
K3K_DOCKER_INSTALL: "true"
|
||||
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
|
||||
run: make test-cli
|
||||
|
||||
- name: Convert coverage data
|
||||
|
||||
17
Makefile
17
Makefile
@@ -18,6 +18,9 @@ CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
|
||||
E2E_LABEL_FILTER ?= "e2e"
|
||||
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
|
||||
@@ -69,7 +72,7 @@ test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
@@ -83,7 +86,7 @@ generate: ## Generate the CRDs specs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
|
||||
--renderer=markdown \
|
||||
--source-path=./pkg/apis/k3k.io/v1alpha1 \
|
||||
--source-path=./pkg/apis/k3k.io/v1beta1 \
|
||||
--output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
|
||||
@@ -105,10 +108,12 @@ validate: generate docs fmt ## Validate the project checking for any dependency
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set image.repository=$(REPO)/k3k \
|
||||
--set image.tag=$(VERSION) \
|
||||
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set sharedAgent.image.tag=$(VERSION) \
|
||||
--set controller.extraEnv[0].name=DEBUG \
|
||||
--set-string controller.extraEnv[0].value=true \
|
||||
--set controller.image.repository=$(REPO)/k3k \
|
||||
--set controller.image.tag=$(VERSION) \
|
||||
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set agent.shared.image.tag=$(VERSION) \
|
||||
k3k ./charts/k3k/
|
||||
|
||||
.PHONY: help
|
||||
|
||||
16
README.md
16
README.md
@@ -1,9 +1,9 @@
|
||||
# K3k: Kubernetes in Kubernetes
|
||||
|
||||
[](https://shields.io/)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3k)
|
||||

|
||||

|
||||

|
||||
[](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml)
|
||||
|
||||
|
||||
K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host. K3k offers both "shared" mode, optimizing resource utilization, and "virtual" mode, providing complete isolation with dedicated K3s server pods. This allows you to access a full Kubernetes experience without the overhead of managing separate physical resources.
|
||||
@@ -11,10 +11,6 @@ K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage
|
||||
K3k integrates seamlessly with Rancher for simplified management of your embedded clusters.
|
||||
|
||||
|
||||
**Experimental Tool**
|
||||
|
||||
This project is still under development and is considered experimental. It may have limitations, bugs, or changes. Please use with caution and report any issues you encounter. We appreciate your feedback as we continue to refine and improve this tool.
|
||||
|
||||
|
||||
## Features and Benefits
|
||||
|
||||
@@ -59,7 +55,7 @@ This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
|
||||
```
|
||||
|
||||
**NOTE:** K3k is currently under development. We recommend using the latest released version when possible.
|
||||
We recommend using the latest released version when possible.
|
||||
|
||||
|
||||
### Install the `k3kcli`
|
||||
@@ -71,7 +67,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.3/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.0/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -79,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.3/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.3.3
|
||||
k3kcli version v1.0.0
|
||||
```
|
||||
|
||||
|
||||
@@ -135,7 +131,7 @@ You can also directly create a Cluster resource in some namespace, to create a K
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.4-rc3
|
||||
appVersion: v0.3.4-rc3
|
||||
version: 1.0.0
|
||||
appVersion: v1.0.0
|
||||
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
- jsonPath: .status.policyName
|
||||
name: Policy
|
||||
type: string
|
||||
name: v1alpha1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@@ -228,6 +228,7 @@ spec:
|
||||
certificates.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled toggles this feature on or off.
|
||||
type: boolean
|
||||
sources:
|
||||
@@ -244,6 +245,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
etcdPeerCA:
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
|
||||
@@ -256,6 +259,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
etcdServerCA:
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key
|
||||
@@ -268,6 +273,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
requestHeaderCA:
|
||||
description: RequestHeaderCA specifies the request-header-ca
|
||||
@@ -280,6 +287,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
serverCA:
|
||||
description: ServerCA specifies the server-ca cert/key pair.
|
||||
@@ -291,6 +300,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
serviceAccountToken:
|
||||
description: ServiceAccountToken specifies the service-account-token
|
||||
@@ -303,8 +314,20 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
required:
|
||||
- clientCA
|
||||
- etcdPeerCA
|
||||
- etcdServerCA
|
||||
- requestHeaderCA
|
||||
- serverCA
|
||||
- serviceAccountToken
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
- sources
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
@@ -326,7 +349,7 @@ spec:
|
||||
use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadbalancer:
|
||||
loadBalancer:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
properties:
|
||||
@@ -365,6 +388,11 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive;
|
||||
only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
|
||||
x).size() <= 1'
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
MirrorHostNodes controls whether node objects from the host cluster
|
||||
@@ -405,7 +433,7 @@ spec:
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
default: 1G
|
||||
default: 2G
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
@@ -574,6 +602,124 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: serviceCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configMaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
type: object
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
- jsonPath: .spec.allowedMode
|
||||
name: Mode
|
||||
type: string
|
||||
name: v1alpha1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@@ -225,6 +225,124 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configMaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the VirtualClusterPolicy.
|
||||
|
||||
@@ -60,3 +60,54 @@ Create the name of the service account to use
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Print the image pull secrets in the expected format (an array of objects with one possible field, "name").
|
||||
*/}}
|
||||
{{- define "image.pullSecrets" }}
|
||||
{{- $imagePullSecrets := list }}
|
||||
{{- range . }}
|
||||
{{- if kindIs "string" . }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }}
|
||||
{{- else }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml $imagePullSecrets }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "controller.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "server.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.virtual.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.shared.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.image.replicaCount }}
|
||||
replicas: {{ .Values.controller.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 6 }}
|
||||
@@ -15,30 +15,40 @@ spec:
|
||||
labels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }}
|
||||
containers:
|
||||
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
name: {{ .Chart.Name }}
|
||||
{{- with .Values.controller.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
args:
|
||||
- k3k
|
||||
- --cluster-cidr={{ .Values.host.clusterCIDR }}
|
||||
- --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }}
|
||||
- --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }}
|
||||
- --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}
|
||||
- --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }}
|
||||
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
|
||||
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
|
||||
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
|
||||
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --agent-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --server-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CLUSTER_CIDR
|
||||
value: {{ .Values.host.clusterCIDR }}
|
||||
- name: SHARED_AGENT_IMAGE
|
||||
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
|
||||
- name: SHARED_AGENT_PULL_POLICY
|
||||
value: {{ .Values.sharedAgent.image.pullPolicy }}
|
||||
- name: K3S_IMAGE
|
||||
value: {{ .Values.k3sServer.image.repository }}
|
||||
- name: K3S_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.k3sServer.image.pullPolicy }}
|
||||
- name: KUBELET_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.kubeletPortRange }}
|
||||
- name: WEBHOOK_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.webhookPortRange }}
|
||||
- name: CONTROLLER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.extraEnv }}
|
||||
{{- with .Values.controller.extraEnv }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
|
||||
@@ -1,31 +1,11 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
global:
|
||||
# -- Global override for container image registry
|
||||
imageRegistry: ""
|
||||
# -- Global override for container image registry pull secrets
|
||||
imagePullSecrets: []
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
@@ -34,18 +14,72 @@ serviceAccount:
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# configuration related to the shared agent mode in k3k
|
||||
sharedAgent:
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
|
||||
controller:
|
||||
replicas: 1
|
||||
image:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
registry: ""
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
# image registry configuration related to the k3s server
|
||||
k3sServer:
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
|
||||
# resources allows you to set resources limits and requests for CPU and Memory
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "200m"
|
||||
# memory: "200Mi"
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "100Mi"
|
||||
resources: {}
|
||||
|
||||
# configuration related to k3s server component in k3k
|
||||
server:
|
||||
imagePullSecrets: []
|
||||
image:
|
||||
registry:
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
# configuration related to the agent component in k3k
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
|
||||
# configuration related to agent in shared mode
|
||||
shared:
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
|
||||
# configuration related to agent in virtual mode
|
||||
virtual:
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
@@ -46,6 +46,7 @@ type CreateConfig struct {
|
||||
policy string
|
||||
mirrorHostNodes bool
|
||||
customCertsPath string
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
@@ -78,7 +79,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
|
||||
if config.mode == string(v1beta1.SharedClusterMode) && config.agents != 0 {
|
||||
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
|
||||
}
|
||||
|
||||
@@ -114,8 +115,8 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
|
||||
cluster := newCluster(name, namespace, config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
cluster.Spec.Expose = &v1beta1.ExposeConfig{
|
||||
NodePort: &v1beta1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
@@ -141,7 +142,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
|
||||
logrus.Infof("Waiting for cluster to be available..")
|
||||
|
||||
if err := waitForCluster(ctx, client, cluster); err != nil {
|
||||
if err := waitForCluster(ctx, client, cluster, config.timeout); err != nil {
|
||||
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
|
||||
}
|
||||
|
||||
@@ -169,17 +170,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Cluster",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
APIVersion: "k3k.io/v1beta1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Servers: ptr.To(int32(config.servers)),
|
||||
Agents: ptr.To(int32(config.agents)),
|
||||
ClusterCIDR: config.clusterCIDR,
|
||||
@@ -189,9 +190,9 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
ServerEnvs: env(config.serverEnvs),
|
||||
AgentEnvs: env(config.agentEnvs),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.PersistenceMode(config.persistenceType),
|
||||
Mode: v1beta1.ClusterMode(config.mode),
|
||||
Persistence: v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.PersistenceMode(config.persistenceType),
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
StorageRequestSize: config.storageRequestSize,
|
||||
},
|
||||
@@ -210,25 +211,25 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
Sources: v1beta1.CredentialSources{
|
||||
ClientCA: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
|
||||
},
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
ServerCA: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
ETCDServerCA: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
ETCDPeerCA: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
RequestHeaderCA: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
ServiceAccountToken: v1beta1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
|
||||
},
|
||||
},
|
||||
@@ -256,9 +257,8 @@ func env(envSlice []string) []v1.EnvVar {
|
||||
return envVars
|
||||
}
|
||||
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
|
||||
interval := 5 * time.Second
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
key := client.ObjectKeyFromObject(cluster)
|
||||
@@ -267,12 +267,12 @@ func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alp
|
||||
}
|
||||
|
||||
// If resource ready -> stop polling
|
||||
if cluster.Status.Phase == v1alpha1.ClusterReady {
|
||||
if cluster.Status.Phase == v1beta1.ClusterReady {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If resource failed -> stop polling with an error
|
||||
if cluster.Status.Phase == v1alpha1.ClusterFailed {
|
||||
if cluster.Status.Phase == v1beta1.ClusterFailed {
|
||||
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,12 @@ package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
@@ -16,7 +17,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
|
||||
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
|
||||
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
|
||||
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
|
||||
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
|
||||
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
|
||||
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
|
||||
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
|
||||
@@ -28,6 +29,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
|
||||
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
|
||||
cmd.Flags().DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h).")
|
||||
}
|
||||
|
||||
func validateCreateConfig(cfg *CreateConfig) error {
|
||||
@@ -36,8 +38,8 @@ func validateCreateConfig(cfg *CreateConfig) error {
|
||||
}
|
||||
|
||||
if cfg.persistenceType != "" {
|
||||
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
switch v1beta1.PersistenceMode(cfg.persistenceType) {
|
||||
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
@@ -50,7 +52,7 @@ func validateCreateConfig(cfg *CreateConfig) error {
|
||||
|
||||
if cfg.mode != "" {
|
||||
switch cfg.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
@@ -50,7 +50,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
cluster := v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
@@ -86,7 +86,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error {
|
||||
var secret v1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
|
||||
@@ -32,7 +32,7 @@ func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
@@ -83,7 +83,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
|
||||
Namespace: appCtx.Namespace(cfg.name),
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
@@ -128,7 +128,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
|
||||
}
|
||||
}
|
||||
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
|
||||
func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
Example: "k3kcli policy create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch config.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
@@ -51,7 +51,7 @@ func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateCo
|
||||
client := appCtx.Client
|
||||
policyName := args[0]
|
||||
|
||||
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
|
||||
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -81,18 +81,18 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
|
||||
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
|
||||
logrus.Infof("Creating policy [%s]", policyName)
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
policy := &v1beta1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: policyName,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "VirtualClusterPolicy",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
APIVersion: "k3k.io/v1beta1",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
Spec: v1beta1.VirtualClusterPolicySpec{
|
||||
AllowedMode: mode,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
@@ -27,7 +27,7 @@ func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []stri
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{}
|
||||
policy := &v1beta1.VirtualClusterPolicy{}
|
||||
policy.Name = name
|
||||
|
||||
if err := client.Delete(ctx, policy); err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
|
||||
@@ -27,7 +27,7 @@ func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) erro
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
var policies v1alpha1.VirtualClusterPolicyList
|
||||
var policies v1beta1.VirtualClusterPolicyList
|
||||
if err := client.List(ctx, &policies); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
)
|
||||
|
||||
@@ -51,7 +51,7 @@ func NewRootCmd() *cobra.Command {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = v1beta1.AddToScheme(scheme)
|
||||
_ = apiextensionsv1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
|
||||
@@ -25,7 +25,7 @@ func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []a
|
||||
}
|
||||
|
||||
for _, version := range crd.Spec.Versions {
|
||||
if version.Name == "v1alpha1" {
|
||||
if version.Name == "v1beta1" {
|
||||
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ This example creates a "shared" mode K3k cluster with:
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: my-virtual-cluster
|
||||
|
||||
@@ -33,6 +33,7 @@ k3kcli cluster create [command options] NAME
|
||||
--service-cidr string service CIDR
|
||||
--storage-class-name string storage class name for dynamic persistence type
|
||||
--storage-request-size string storage size for dynamic persistence type
|
||||
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
|
||||
--token string token of the cluster
|
||||
--version string k3s version
|
||||
```
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# API Reference
|
||||
|
||||
## Packages
|
||||
- [k3k.io/v1alpha1](#k3kiov1alpha1)
|
||||
- [k3k.io/v1beta1](#k3kiov1beta1)
|
||||
|
||||
|
||||
## k3k.io/v1alpha1
|
||||
## k3k.io/v1beta1
|
||||
|
||||
|
||||
### Resource Types
|
||||
@@ -47,7 +47,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
|
||||
| `kind` _string_ | `Cluster` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
@@ -65,7 +65,7 @@ ClusterList is a list of Cluster resources.
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
|
||||
| `kind` _string_ | `ClusterList` | | |
|
||||
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `items` _[Cluster](#cluster) array_ | | | |
|
||||
@@ -134,10 +134,28 @@ _Appears in:_
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
|
||||
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### ConfigMapSyncConfig
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### CredentialSource
|
||||
|
||||
|
||||
@@ -190,7 +208,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | true | |
|
||||
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
|
||||
|
||||
|
||||
@@ -208,7 +226,7 @@ _Appears in:_
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `loadBalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
|
||||
|
||||
|
||||
@@ -229,6 +247,23 @@ _Appears in:_
|
||||
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
|
||||
|
||||
|
||||
#### IngressSyncConfig
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
|
||||
|
||||
@@ -278,7 +313,7 @@ _Appears in:_
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
@@ -294,6 +329,23 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PersistentVolumeClaimSyncConfig
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### PodSecurityAdmissionLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
@@ -308,6 +360,79 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PriorityClassSyncConfig
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### ServiceSyncConfig
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SyncConfig
|
||||
|
||||
|
||||
|
||||
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
|
||||
| `configMaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
|
||||
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
|
||||
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
|
||||
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicy
|
||||
|
||||
|
||||
@@ -322,7 +447,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicy` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
|
||||
@@ -340,7 +465,7 @@ VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicyList` | | |
|
||||
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
|
||||
@@ -366,6 +491,7 @@ _Appears in:_
|
||||
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
|
||||
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
|
||||
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ Create then the virtual cluster exposing through NodePort one of the ports that
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
|
||||
@@ -32,18 +32,27 @@ Load these images into your internal (air-gapped) registry.
|
||||
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
|
||||
|
||||
```yaml
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
sharedAgent:
|
||||
controller:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
k3sServer:
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
virtual:
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
shared:
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
server:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
@@ -17,7 +17,7 @@ This guide walks through the various ways to create and manage virtual clusters
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ingress
|
||||
@@ -46,7 +46,7 @@ This will create a virtual cluster in `shared` mode and expose it via an ingress
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-persistent
|
||||
@@ -80,7 +80,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ha
|
||||
@@ -105,7 +105,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-virtual
|
||||
@@ -136,7 +136,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ephemeral
|
||||
@@ -162,7 +162,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-custom-k8s
|
||||
@@ -189,7 +189,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-resourced
|
||||
@@ -216,7 +216,7 @@ This configures the CPU and memory limit for the virtual cluster.
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-node-placed
|
||||
@@ -259,7 +259,7 @@ k3kcli cluster create \
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-http-proxy
|
||||
|
||||
@@ -37,7 +37,7 @@ If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g
|
||||
|
||||
```yaml
|
||||
# Example of a minimal VCP (after creation with defaults)
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: my-default-policy
|
||||
@@ -56,7 +56,7 @@ You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster`
|
||||
**Example:** Allow only "shared" mode clusters.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: shared-only-policy
|
||||
@@ -74,7 +74,7 @@ You can define resource consumption limits for bound Namespaces by specifying a
|
||||
**Example:** Set CPU, memory, and pod limits.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: quota-policy
|
||||
@@ -93,7 +93,7 @@ You can define default resource requests/limits and min/max constraints for cont
|
||||
**Example:** Define default CPU requests/limits and min/max CPU.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: limit-policy
|
||||
@@ -118,7 +118,7 @@ By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network
|
||||
**Example:** Disable the default NetworkPolicy.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: no-default-netpol-policy
|
||||
@@ -133,7 +133,7 @@ You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admiss
|
||||
**Example:** Enforce the "baseline" PSS level.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: baseline-psa-policy
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: example1
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
version: v1.26.0-k3s2
|
||||
clusterCIDR: 10.30.0.0/16
|
||||
serviceCIDR: 10.31.0.0/16
|
||||
clusterDNS: 10.30.0.10
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
expose:
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "nginx"
|
||||
15
examples/shared-multiple-servers.yaml
Normal file
15
examples/shared-multiple-servers.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: shared-multiple-servers
|
||||
spec:
|
||||
mode: shared
|
||||
servers: 3
|
||||
agents: 3
|
||||
version: v1.33.1-k3s1
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
14
examples/shared-single-server.yaml
Normal file
14
examples/shared-single-server.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: shared-single-server
|
||||
spec:
|
||||
mode: shared
|
||||
servers: 1
|
||||
version: v1.33.1-k3s1
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: single-server
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
version: v1.26.0-k3s2
|
||||
clusterCIDR: 10.30.0.0/16
|
||||
serviceCIDR: 10.31.0.0/16
|
||||
clusterDNS: 10.30.0.10
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
expose:
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "nginx"
|
||||
13
examples/virtual-server.yaml
Normal file
13
examples/virtual-server.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: virtual-server
|
||||
spec:
|
||||
mode: virtual
|
||||
servers: 3
|
||||
agents: 3
|
||||
version: v1.33.1-k3s1
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
@@ -1,9 +1,9 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: policy-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedMode: "shared"
|
||||
spec:
|
||||
allowedMode: shared
|
||||
disableNetworkPolicy: true
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
# defaultPriorityClass: "lowpriority"
|
||||
21
go.mod
21
go.mod
@@ -11,6 +11,7 @@ replace (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.4.2
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
@@ -42,16 +43,6 @@ require (
|
||||
sigs.k8s.io/controller-runtime v0.19.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
@@ -64,6 +55,7 @@ require (
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -98,13 +90,13 @@ require (
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
@@ -161,6 +153,7 @@ require (
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
@@ -171,13 +164,17 @@ require (
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
@@ -218,7 +215,7 @@ require (
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kms v0.31.4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const ConfigMapSyncerName = "configmap-syncer"
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) Name() string {
|
||||
return ConfigMapSyncerName
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !c.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.ConfigMap
|
||||
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = c.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = c.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
return c.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Insert(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return c.removeHostConfigMap(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove configmap: %w", err)
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Delete(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vConfigMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return c.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
type ControllerHandler struct {
|
||||
sync.RWMutex
|
||||
// Mgr is the manager used to run new controllers - from the virtual cluster
|
||||
Mgr manager.Manager
|
||||
// Scheme is the scheme used to run new controllers - from the virtual cluster
|
||||
Scheme runtime.Scheme
|
||||
// HostClient is the client used to communicate with the host cluster
|
||||
HostClient client.Client
|
||||
// VirtualClient is the client used to communicate with the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// Translator is the translator that will be used to adjust objects before they
|
||||
// are made on the host cluster
|
||||
Translator translate.ToHostTranslator
|
||||
// Logger is the logger that the controller will use to log errors
|
||||
Logger *k3klog.Logger
|
||||
// controllers are the controllers which are currently running
|
||||
controllers map[schema.GroupVersionKind]updateableReconciler
|
||||
}
|
||||
|
||||
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
|
||||
// be altered through the Add and Remove methods
|
||||
type updateableReconciler interface {
|
||||
reconcile.Reconciler
|
||||
Name() string
|
||||
AddResource(ctx context.Context, namespace string, name string) error
|
||||
RemoveResource(ctx context.Context, namespace string, name string) error
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
|
||||
c.RLock()
|
||||
|
||||
controllers := c.controllers
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
|
||||
c.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we need to manually lock/unlock since we intned on write locking to add a new controller
|
||||
c.RUnlock()
|
||||
|
||||
var r updateableReconciler
|
||||
|
||||
switch obj.(type) {
|
||||
case *v1.Secret:
|
||||
r = &SecretSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
|
||||
// note that this doesn't do any type safety - fix this
|
||||
// when generics work
|
||||
c.Translator.TranslateTo(s)
|
||||
// Remove service-account-token types when synced to the host
|
||||
if s.Type == v1.SecretTypeServiceAccountToken {
|
||||
s.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
case *v1.ConfigMap:
|
||||
r = &ConfigMapSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
|
||||
c.Translator.TranslateTo(s)
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
default:
|
||||
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
|
||||
// logic could be used for other types.
|
||||
return fmt.Errorf("unrecognized type: %T", obj)
|
||||
}
|
||||
|
||||
err := ctrl.NewControllerManagedBy(c.Mgr).
|
||||
Named(r.Name()).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
|
||||
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
|
||||
|
||||
c.Unlock()
|
||||
|
||||
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
|
||||
// since we aren't adding a new controller, we don't need to lock
|
||||
c.RLock()
|
||||
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
|
||||
c.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
|
||||
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcController = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := PVCReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(pvcController).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const SecretSyncerName = "secret-syncer"
|
||||
|
||||
type SecretSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) Name() string {
|
||||
return SecretSyncerName
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !s.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.Secret
|
||||
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = s.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = s.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
return s.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Insert(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return s.removeHostSecret(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove secret: %w", err)
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Delete(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vSecret corev1.Secret
|
||||
|
||||
err := s.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return s.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
154
k3k-kubelet/controller/syncer/configmap.go
Normal file
154
k3k-kubelet/controller/syncer/configmap.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
configMapControllerName = "configmap-syncer"
|
||||
configMapFinalizerName = "configmap.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) Name() string {
|
||||
return configMapControllerName
|
||||
}
|
||||
|
||||
// AddConfigMapSyncer adds configmap syncer controller to the manager of the virtual cluster
|
||||
func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := ConfigMapSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, configMapControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&corev1.ConfigMap{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for configMap Sync Config
|
||||
syncConfig := cluster.Spec.Sync.ConfigMaps
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualConfigMap corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtualConfigMap.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced configMap if exist
|
||||
if err := c.HostClient.Delete(ctx, syncedConfigMap); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced configMap
|
||||
if controllerutil.RemoveFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostConfigMap corev1.ConfigMap
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: syncedConfigMap.Name, Namespace: syncedConfigMap.Namespace}, &hostConfigMap); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ConfigMap for the first time on the host cluster")
|
||||
return reconcile.Result{}, c.HostClient.Create(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating ConfigMap on the host cluster")
|
||||
|
||||
return reconcile.Result{}, c.HostClient.Update(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
// translateConfigMap will translate a given configMap created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (c *ConfigMapSyncer) translateConfigMap(configMap *corev1.ConfigMap) *corev1.ConfigMap {
|
||||
hostConfigMap := configMap.DeepCopy()
|
||||
c.Translator.TranslateTo(hostConfigMap)
|
||||
|
||||
return hostConfigMap
|
||||
}
|
||||
236
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
236
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ConfigMapTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddConfigMapSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostConfigMap.Labels)
|
||||
})
|
||||
|
||||
It("updates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(configMap)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual configmap
|
||||
err = virtTestEnv.k8sClient.Update(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(configMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
|
||||
// check hostConfigMap
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostConfigMap.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a configMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(hostConfigMap.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not sync a configMap if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.ConfigMaps.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
GinkgoWriter.Printf("error: %v", err)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
162
k3k-kubelet/controller/syncer/ingress.go
Normal file
162
k3k-kubelet/controller/syncer/ingress.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
ingressControllerName = "ingress-syncer-controller"
|
||||
ingressFinalizerName = "ingress.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type IngressReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddIngressSyncer adds ingress syncer controller to the manager of the virtual cluster
|
||||
func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := IngressReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, ingressControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&networkingv1.Ingress{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for ingressConfig
|
||||
syncConfig := cluster.Spec.Sync.Ingresses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
log.Info("reconciling ingress object")
|
||||
|
||||
var (
|
||||
virtIngress networkingv1.Ingress
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtIngress.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
|
||||
if controllerutil.AddFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the ingress on host
|
||||
var hostIngress networkingv1.Ingress
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedIngress.Name, Namespace: r.ClusterNamespace}, &hostIngress); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ingress for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating ingress on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
|
||||
hostIngress := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostIngress)
|
||||
|
||||
for _, rule := range hostIngress.Spec.Rules {
|
||||
// modify services in rules to point to the synced services
|
||||
if rule.HTTP != nil {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
if path.Backend.Service != nil {
|
||||
path.Backend.Service.Name = s.Translator.TranslateName(obj.GetNamespace(), path.Backend.Service.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// don't sync finalizers to the host
|
||||
return hostIngress
|
||||
}
|
||||
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var IngressTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
Ingresses: v1beta1.IngressSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostIngress.Labels)
|
||||
})
|
||||
|
||||
It("updates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
key := client.ObjectKeyFromObject(ingress)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "test-service-updated"
|
||||
|
||||
// update virtual ingress
|
||||
err = virtTestEnv.k8sClient.Update(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostIngress
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal(translateName(cluster, ingress.Namespace, "test-service-updated")))
|
||||
})
|
||||
|
||||
It("deletes a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not sync an Ingress if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Ingresses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcControllerName = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := PVCReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, pvcControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced pvc if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced pvc
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume claim for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
104
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
104
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var PVCTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a pvc on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("test-sc"),
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
|
||||
|
||||
var hostPVC v1.PersistentVolumeClaim
|
||||
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPVCName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName))
|
||||
|
||||
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -15,64 +16,70 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
podController = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
podControllerName = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(podController).
|
||||
Named(name).
|
||||
For(&v1.Pod{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If PVC syncing is disabled, only process deletions to allow for cleanup.
|
||||
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPod v1.Pod
|
||||
cluster v1alpha1.Cluster
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
@@ -103,14 +110,19 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return r.handlePodDeletion(ctx, pv)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.virtualClient.Create(ctx, pv); err != nil {
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
@@ -119,7 +131,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -135,7 +147,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.virtualClient.Status().Update(ctx, pvcPatch)
|
||||
return r.VirtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
@@ -182,3 +194,22 @@ func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVo
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
|
||||
var currentPV v1.PersistentVolume
|
||||
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), ¤tPV); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pvPatch := currentPV.DeepCopy()
|
||||
pvPatch.Spec.ClaimRef = nil
|
||||
pvPatch.Status.Phase = v1.VolumeReleased
|
||||
|
||||
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
|
||||
|
||||
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, ¤tPV))
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller_test
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,9 +12,8 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -23,7 +22,7 @@ import (
|
||||
var PriorityClassTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -37,16 +36,23 @@ var PriorityClassTests = func() {
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
PriorityClasses: v1beta1.PriorityClassSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = controller.AddPriorityClassReconciler(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
err = syncer.AddPriorityClassSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -215,15 +221,36 @@ var PriorityClassTests = func() {
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
|
||||
Expect(hostPriorityClass.Annotations[controller.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
Expect(hostPriorityClass.Annotations[syncer.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
})
|
||||
|
||||
It("will not create a priorityClass on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.PriorityClasses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
|
||||
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,42 +28,32 @@ const (
|
||||
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PriorityClassReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
type PriorityClassSyncer struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPriorityClassReconciler adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassReconciler(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// AddPriorityClassSyncer adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PriorityClassReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
reconciler := PriorityClassSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := translator.TranslateName("", priorityClassControllerName)
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, priorityClassControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&schedulingv1.PriorityClass{}).
|
||||
WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
For(&schedulingv1.PriorityClass{}).WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
@@ -83,36 +73,65 @@ var ignoreSystemPrefixPredicate = predicate.Funcs{
|
||||
},
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for priorityClassConfig
|
||||
syncConfig := cluster.Spec.Sync.PriorityClasses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
priorityClass schedulingv1.PriorityClass
|
||||
cluster v1alpha1.Cluster
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !priorityClass.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
// TODO add test for previous implementation without err != nil check, and also check the other controllers
|
||||
if err := r.hostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
if err := r.HostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -122,7 +141,7 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -130,19 +149,19 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
// create the priorityClass on the host
|
||||
log.Info("creating the priorityClass for the first time on the host cluster")
|
||||
|
||||
err := r.hostClient.Create(ctx, hostPriorityClass)
|
||||
err := r.HostClient.Create(ctx, hostPriorityClass)
|
||||
if err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, hostPriorityClass)
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, hostPriorityClass)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
func (r *PriorityClassSyncer) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
hostPriorityClass := priorityClass.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPriorityClass)
|
||||
|
||||
159
k3k-kubelet/controller/syncer/secret.go
Normal file
159
k3k-kubelet/controller/syncer/secret.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
secretControllerName = "secret-syncer"
|
||||
secretFinalizerName = "secret.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type SecretSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) Name() string {
|
||||
return secretControllerName
|
||||
}
|
||||
|
||||
// AddSecretSyncer adds secret syncer controller to the manager of the virtual cluster
|
||||
func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := SecretSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, secretControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Secret{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *SecretSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for Secrets Sync Config
|
||||
syncConfig := cluster.Spec.Sync.Secrets
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualSecret v1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtualSecret.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced secret if exist
|
||||
if err := s.HostClient.Delete(ctx, syncedSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced secret
|
||||
if controllerutil.RemoveFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostSecret v1.Secret
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: syncedSecret.Name, Namespace: syncedSecret.Namespace}, &hostSecret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the Secret for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.HostClient.Create(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating Secret on the host cluster")
|
||||
|
||||
return reconcile.Result{}, s.HostClient.Update(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
// translateSecret will translate a given secret created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (s *SecretSyncer) translateSecret(secret *v1.Secret) *v1.Secret {
|
||||
hostSecret := secret.DeepCopy()
|
||||
|
||||
if hostSecret.Type == v1.SecretTypeServiceAccountToken {
|
||||
hostSecret.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
|
||||
s.Translator.TranslateTo(hostSecret)
|
||||
|
||||
return hostSecret
|
||||
}
|
||||
233
k3k-kubelet/controller/syncer/secret_test.go
Normal file
233
k3k-kubelet/controller/syncer/secret_test.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var SecretTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostSecret.Labels)
|
||||
})
|
||||
|
||||
It("updates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(secret)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual secret
|
||||
err = virtTestEnv.k8sClient.Update(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(secret.Labels).To(ContainElement("bar"))
|
||||
|
||||
// check hostSecret
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostSecret.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(hostSecret.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not create a secret on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Secrets.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -15,23 +16,16 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceSyncerController = "service-syncer-controller"
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
serviceControllerName = "service-syncer-controller"
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
|
||||
@@ -42,24 +36,25 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
|
||||
}
|
||||
|
||||
reconciler := ServiceReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translator,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, serviceControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(serviceSyncerController).
|
||||
For(&v1.Service{}).
|
||||
Named(name).
|
||||
For(&v1.Service{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
@@ -68,34 +63,33 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
var (
|
||||
virtService v1.Service
|
||||
cluster v1alpha1.Cluster
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtService.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
|
||||
if err := r.HostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
if controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -104,20 +98,18 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
if controllerutil.AddFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the service on host
|
||||
var hostService v1.Service
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.ClusterNamespace}, &hostService); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedService)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
@@ -125,7 +117,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
log.Info("updating service on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for serviceSyncConfig
|
||||
syncConfig := cluster.Spec.Sync.Services
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
|
||||
269
k3k-kubelet/controller/syncer/service_test.go
Normal file
269
k3k-kubelet/controller/syncer/service_test.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ServiceTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddServiceSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostService.Labels)
|
||||
})
|
||||
|
||||
It("updates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
key := client.ObjectKeyFromObject(service)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service.Spec.Ports[0].Name = "test-port-updated"
|
||||
|
||||
// update virtual service
|
||||
err = virtTestEnv.k8sClient.Update(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostService
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostService.Spec.Ports[0].Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal("test-port-updated"))
|
||||
})
|
||||
|
||||
It("deletes a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not create a service on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Services.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
type SyncerContext struct {
|
||||
ClusterName string
|
||||
ClusterNamespace string
|
||||
VirtualClient client.Client
|
||||
HostClient client.Client
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller_test
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -19,7 +19,8 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -91,7 +92,7 @@ func NewTestEnv() *TestEnv {
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "k3k", "crds")},
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
BinaryAssetsDirectory: tempDir,
|
||||
Scheme: buildScheme(),
|
||||
@@ -118,7 +119,7 @@ func buildScheme() *runtime.Scheme {
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
@@ -165,5 +166,19 @@ var _ = Describe("Kubelet Controller", func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
Describe("PriorityClass", PriorityClassTests)
|
||||
Describe("PriorityClass Syncer", PriorityClassTests)
|
||||
Describe("ConfigMap Syncer", ConfigMapTests)
|
||||
Describe("Secret Syncer", SecretTests)
|
||||
Describe("Service Syncer", ServiceTests)
|
||||
Describe("Ingress Syncer", IngressTests)
|
||||
Describe("PersistentVolumeClaim Syncer", PVCTests)
|
||||
})
|
||||
|
||||
func translateName(cluster v1beta1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
@@ -20,11 +21,10 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
webhookName = "podmutator.k3k.io"
|
||||
webhookName = "podmutating.k3k.io"
|
||||
webhookTimeout = int32(10)
|
||||
webhookPath = "/mutate--v1-pod"
|
||||
FieldpathField = "k3k.io/fieldpath"
|
||||
@@ -36,14 +36,14 @@ type webhookHandler struct {
|
||||
serviceName string
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
logger *log.Logger
|
||||
logger logr.Logger
|
||||
webhookPort int
|
||||
}
|
||||
|
||||
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
|
||||
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
|
||||
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
@@ -54,7 +54,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
webhookPort: webhookPort,
|
||||
}
|
||||
|
||||
// create mutator webhook configuration to the cluster
|
||||
// create mutating webhook configuration to the cluster
|
||||
config, err := handler.configuration(ctx, hostClient)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -75,7 +75,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
|
||||
}
|
||||
|
||||
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
|
||||
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
// look for status.* fields in the env
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
@@ -100,7 +100,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
}
|
||||
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Infow("extracting webhook tls from host cluster")
|
||||
w.logger.Info("extracting webhook tls from host cluster")
|
||||
|
||||
var webhookTLSSecret v1.Secret
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log/klogv2"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
@@ -35,29 +36,25 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
var (
|
||||
baseScheme = runtime.NewScheme()
|
||||
k3kKubeletName = "k3k-kubelet"
|
||||
)
|
||||
var baseScheme = runtime.NewScheme()
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(baseScheme)
|
||||
_ = v1alpha1.AddToScheme(baseScheme)
|
||||
_ = v1beta1.AddToScheme(baseScheme)
|
||||
}
|
||||
|
||||
type kubelet struct {
|
||||
virtualCluster v1alpha1.Cluster
|
||||
virtualCluster v1beta1.Cluster
|
||||
|
||||
name string
|
||||
port int
|
||||
@@ -70,11 +67,11 @@ type kubelet struct {
|
||||
hostMgr manager.Manager
|
||||
virtualMgr manager.Manager
|
||||
node *nodeutil.Node
|
||||
logger *k3klog.Logger
|
||||
logger logr.Logger
|
||||
token string
|
||||
}
|
||||
|
||||
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
|
||||
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,7 +94,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
ctrl.SetLogger(logger)
|
||||
|
||||
hostMetricsBindAddress := ":8083"
|
||||
virtualMetricsBindAddress := ":8084"
|
||||
@@ -150,34 +147,14 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutator webhook")
|
||||
logger.Info("adding pod mutating webhook")
|
||||
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
|
||||
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPriorityClassReconciler(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
|
||||
return nil, errors.New("failed to add controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
@@ -193,7 +170,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
|
||||
}
|
||||
|
||||
var virtualCluster v1alpha1.Cluster
|
||||
var virtualCluster v1beta1.Cluster
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
|
||||
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
|
||||
}
|
||||
@@ -209,7 +186,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
hostMgr: hostMgr,
|
||||
virtualMgr: virtualMgr,
|
||||
agentIP: clusterIP,
|
||||
logger: logger.Named(k3kKubeletName),
|
||||
logger: logger,
|
||||
token: c.Token,
|
||||
dnsIP: dnsService.Spec.ClusterIP,
|
||||
port: c.KubeletPort,
|
||||
@@ -231,9 +208,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
|
||||
func (k *kubelet) registerNode(agentIP string, cfg config) error {
|
||||
providerFunc := k.newProviderFunc(cfg)
|
||||
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
|
||||
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
@@ -251,34 +228,36 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
go func() {
|
||||
err := k.hostMgr.Start(ctx)
|
||||
if err != nil {
|
||||
k.logger.Fatalw("host manager stopped", zap.Error(err))
|
||||
k.logger.Error(err, "host manager stopped")
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err := k.virtualMgr.Start(ctx)
|
||||
if err != nil {
|
||||
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
|
||||
k.logger.Error(err, "virtual manager stopped")
|
||||
}
|
||||
}()
|
||||
|
||||
// run the node async so that we can wait for it to be ready in another call
|
||||
|
||||
go func() {
|
||||
ctx = log.WithLogger(ctx, k.logger)
|
||||
klog.SetLogger(k.logger)
|
||||
|
||||
ctx = log.WithLogger(ctx, klogv2.New(nil))
|
||||
if err := k.node.Run(ctx); err != nil {
|
||||
k.logger.Fatalw("node errored when running", zap.Error(err))
|
||||
k.logger.Error(err, "node errored when running")
|
||||
}
|
||||
}()
|
||||
|
||||
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
|
||||
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
|
||||
k.logger.Error(err, "node was not ready within timeout of 1 minute")
|
||||
}
|
||||
|
||||
<-k.node.Done()
|
||||
|
||||
if err := k.node.Err(); err != nil {
|
||||
k.logger.Fatalw("node stopped with an error", zap.Error(err))
|
||||
k.logger.Error(err, "node stopped with an error")
|
||||
}
|
||||
|
||||
k.logger.Info("node exited successfully")
|
||||
@@ -297,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
|
||||
// set up the routes
|
||||
@@ -308,7 +287,7 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
|
||||
|
||||
c.Handler = mux
|
||||
|
||||
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
|
||||
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
@@ -319,12 +298,12 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
|
||||
}
|
||||
}
|
||||
|
||||
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
|
||||
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
|
||||
if virtualConfigPath != "" {
|
||||
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
|
||||
}
|
||||
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
|
||||
var cluster v1alpha1.Cluster
|
||||
var cluster v1beta1.Cluster
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -338,7 +317,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
logger.Infow("decoded bootstrap", zap.Error(err))
|
||||
logger.Error(err, "decoded bootstrap")
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
@@ -389,17 +368,10 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
|
||||
return clientcmd.Write(*config)
|
||||
}
|
||||
|
||||
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
|
||||
var (
|
||||
cluster v1alpha1.Cluster
|
||||
b *bootstrap.ControlRuntimeBootstrap
|
||||
)
|
||||
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
@@ -447,3 +419,56 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: c.ClusterNamespace,
|
||||
Name: c.ClusterName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, objKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syncer.AddConfigMapSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add configmap global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
if err := syncer.AddSecretSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add secret global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := syncer.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding ingress syncer controller")
|
||||
|
||||
if err := syncer.AddIngressSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add ingress syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := syncer.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
@@ -22,8 +22,9 @@ import (
|
||||
var (
|
||||
configFile string
|
||||
cfg config
|
||||
logger *log.Logger
|
||||
logger logr.Logger
|
||||
debug bool
|
||||
logFormat string
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -34,13 +35,16 @@ func main() {
|
||||
if err := InitializeConfig(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
logger = zapr.NewLogger(log.New(debug, logFormat))
|
||||
ctrlruntimelog.SetLogger(logger)
|
||||
return nil
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
|
||||
@@ -53,7 +57,6 @@ func main() {
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
|
||||
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
@@ -73,7 +76,7 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
|
||||
}
|
||||
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
|
||||
if err := k.registerNode(k.agentIP, cfg); err != nil {
|
||||
return fmt.Errorf("failed to register new node: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -12,16 +13,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
ctx := context.Background()
|
||||
if mirrorHostNodes {
|
||||
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Fatal("error getting host node for mirroring", err)
|
||||
logger.Error(err, "error getting host node for mirroring", err)
|
||||
}
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
@@ -56,7 +56,7 @@ func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, se
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
logger.Error(err, "error updating node capacity")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -12,13 +12,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -38,13 +38,11 @@ import (
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
// check at compile time if the Provider implements the nodeutil.Provider interface
|
||||
@@ -53,22 +51,22 @@ var _ nodeutil.Provider = (*Provider)(nil)
|
||||
// Provider implements nodetuil.Provider from virtual Kubelet.
|
||||
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
|
||||
type Provider struct {
|
||||
Handler controller.ControllerHandler
|
||||
Translator translate.ToHostTranslator
|
||||
HostClient client.Client
|
||||
VirtualClient client.Client
|
||||
VirtualManager manager.Manager
|
||||
ClientConfig rest.Config
|
||||
CoreClient cv1.CoreV1Interface
|
||||
ClusterNamespace string
|
||||
ClusterName string
|
||||
serverIP string
|
||||
dnsIP string
|
||||
logger *k3klog.Logger
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
var ErrRetryTimeout = errors.New("provider timed out")
|
||||
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
coreClient, err := cv1.NewForConfig(&hostConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -80,16 +78,9 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
Handler: controller.ControllerHandler{
|
||||
Mgr: virtualMgr,
|
||||
Scheme: *virtualMgr.GetScheme(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
Translator: translator,
|
||||
Logger: logger,
|
||||
},
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
VirtualManager: virtualMgr,
|
||||
Translator: translator,
|
||||
ClientConfig: hostConfig,
|
||||
CoreClient: coreClient,
|
||||
@@ -134,7 +125,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
|
||||
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
|
||||
|
||||
return closer, err
|
||||
}
|
||||
@@ -208,7 +199,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
|
||||
// GetStatsSummary gets the stats for the node, including running pods
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
|
||||
p.logger.Debug("GetStatsSummary")
|
||||
p.logger.V(1).Info("GetStatsSummary")
|
||||
|
||||
nodeList := &corev1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
@@ -340,7 +331,14 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
// createPod takes a Kubernetes Pod and deploys it within the provider.
|
||||
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
tPod := pod.DeepCopy()
|
||||
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
|
||||
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
|
||||
var sourcePod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tPod := sourcePod.DeepCopy()
|
||||
p.Translator.TranslateTo(tPod)
|
||||
|
||||
// get Cluster definition
|
||||
@@ -349,7 +347,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
Name: p.ClusterName,
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
@@ -385,12 +383,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
|
||||
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
|
||||
// host cluster version
|
||||
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// sync serviceaccount token to a the host cluster
|
||||
@@ -398,10 +396,14 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
|
||||
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
p.logger.Infow("creating pod",
|
||||
p.logger.Info("creating pod",
|
||||
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
@@ -443,58 +445,22 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev
|
||||
|
||||
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
|
||||
// if one/more volumes couldn't be transformed
|
||||
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
|
||||
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
var optional bool
|
||||
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
|
||||
if volume.ConfigMap != nil {
|
||||
if volume.ConfigMap.Optional != nil {
|
||||
optional = *volume.ConfigMap.Optional
|
||||
}
|
||||
|
||||
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
if volume.Secret.Optional != nil {
|
||||
optional = *volume.Secret.Optional
|
||||
}
|
||||
|
||||
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
if source.ConfigMap.Optional != nil {
|
||||
optional = *source.ConfigMap.Optional
|
||||
}
|
||||
|
||||
configMapName := source.ConfigMap.Name
|
||||
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
|
||||
}
|
||||
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
if source.Secret.Optional != nil {
|
||||
optional = *source.Secret.Optional
|
||||
}
|
||||
|
||||
secretName := source.Secret.Name
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
}
|
||||
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
@@ -517,64 +483,13 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: configMapName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
|
||||
// check if its optional configmap
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
|
||||
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
|
||||
|
||||
var secret corev1.Secret
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: secretName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePod executes updatePod with retry
|
||||
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.updatePod, pod)
|
||||
}
|
||||
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Debugw("got a request for update pod")
|
||||
p.logger.V(1).Info("got a request for update pod")
|
||||
|
||||
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
|
||||
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
|
||||
@@ -604,13 +519,18 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, ¤tHostPod, metav1.UpdateOptions{}); err != nil {
|
||||
p.logger.Errorf("error when updating ephemeral containers: %v", err)
|
||||
p.logger.Error(err, "error when updating ephemeral containers")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(¤tVirtualPod, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
@@ -670,85 +590,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
|
||||
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
|
||||
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Infof("Got request to delete pod %s", pod.Name)
|
||||
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
|
||||
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
|
||||
// note that we don't return an error here. The pod was successfully deleted, another process
|
||||
// should clean this without affecting the user
|
||||
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Deleted pod %s", pod.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
|
||||
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
|
||||
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
|
||||
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
|
||||
// for pruning
|
||||
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
|
||||
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
|
||||
|
||||
var pods corev1.PodList
|
||||
// only pods in the same namespace could be using secrets/configmaps that this pod is using
|
||||
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
|
||||
Namespace: pod.Namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
for _, vPod := range pods.Items {
|
||||
if vPod.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
|
||||
pruneSecrets.Delete(secrets...)
|
||||
pruneConfigMap.Delete(configMaps...)
|
||||
}
|
||||
|
||||
for _, secretName := range pruneSecrets.UnsortedList() {
|
||||
var secret corev1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
|
||||
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, configMapName := range pruneConfigMap.UnsortedList() {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: configMapName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
}
|
||||
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -758,7 +613,7 @@ func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) erro
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
|
||||
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
|
||||
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(namespace, name),
|
||||
@@ -780,14 +635,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
|
||||
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
|
||||
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
|
||||
|
||||
pod, err := p.GetPod(ctx, namespace, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get pod for status: %w", err)
|
||||
}
|
||||
|
||||
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
|
||||
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
|
||||
|
||||
return pod.Status.DeepCopy(), nil
|
||||
}
|
||||
@@ -869,22 +724,22 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
pod.Spec.Containers[i].Env = mergeEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
pod.Spec.InitContainers[i].Env = mergeEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle ephemeral containers as well
|
||||
for i := range pod.Spec.EphemeralContainers {
|
||||
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
|
||||
pod.Spec.EphemeralContainers[i].Env = mergeEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
}
|
||||
|
||||
// overrideEnvVars will override the orig environment variables if found in the updated list
|
||||
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
// mergeEnvVars will override the orig environment variables if found in the updated list and will add them to the list if not found
|
||||
func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
if len(updated) == 0 {
|
||||
return orig
|
||||
}
|
||||
@@ -895,43 +750,23 @@ func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
|
||||
}
|
||||
|
||||
for i, origEnvVar := range orig {
|
||||
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
|
||||
orig[i] = updatedEnvVar
|
||||
for i, env := range orig {
|
||||
if updatedEnv, ok := updatedEnvVarMap[env.Name]; ok {
|
||||
orig[i] = updatedEnv
|
||||
// Remove the updated variable from the map
|
||||
delete(updatedEnvVarMap, env.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Any variables remaining in the map are new and should be appended to the original slice.
|
||||
for _, env := range updatedEnvVarMap {
|
||||
orig = append(orig, env)
|
||||
}
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
|
||||
// for removing/seeing which virtual cluster resources need to be in the host cluster.
|
||||
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
var (
|
||||
secrets []string
|
||||
configMaps []string
|
||||
)
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Secret != nil {
|
||||
secrets = append(secrets, volume.Secret.SecretName)
|
||||
} else if volume.ConfigMap != nil {
|
||||
configMaps = append(configMaps, volume.ConfigMap.Name)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
configMaps = append(configMaps, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
secrets = append(secrets, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, configMaps
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_overrideEnvVars(t *testing.T) {
|
||||
func Test_mergeEnvVars(t *testing.T) {
|
||||
type args struct {
|
||||
orig []corev1.EnvVar
|
||||
new []corev1.EnvVar
|
||||
@@ -32,7 +32,7 @@ func Test_overrideEnvVars(t *testing.T) {
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []corev1.EnvVar{},
|
||||
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
@@ -56,14 +56,14 @@ func Test_overrideEnvVars(t *testing.T) {
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
|
||||
if got := mergeEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("mergeEnvVars() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ const (
|
||||
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
|
||||
// to look like the serviceaccount token
|
||||
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
|
||||
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
|
||||
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
|
||||
|
||||
// skip this process if the kube-api-access is already removed from the pod
|
||||
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -34,6 +36,13 @@ type ToHostTranslator struct {
|
||||
ClusterNamespace string
|
||||
}
|
||||
|
||||
func NewHostTranslator(cluster *v1beta1.Cluster) *ToHostTranslator {
|
||||
return &ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
|
||||
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
|
||||
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
|
||||
@@ -125,3 +134,11 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
|
||||
|
||||
return controller.SafeConcatName(namePrefix, nameSuffix)
|
||||
}
|
||||
|
||||
// NamespacedName returns the types.NamespacedName of the resource in the host cluster
|
||||
func (t *ToHostTranslator) NamespacedName(obj client.Object) types.NamespacedName {
|
||||
return types.NamespacedName{
|
||||
Namespace: t.ClusterNamespace,
|
||||
Name: t.TranslateName(obj.GetNamespace(), obj.GetName()),
|
||||
}
|
||||
}
|
||||
|
||||
93
main.go
93
main.go
@@ -5,10 +5,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
@@ -19,7 +22,7 @@ import (
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
@@ -28,23 +31,20 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
clusterCIDR string
|
||||
sharedAgentImage string
|
||||
sharedAgentImagePullPolicy string
|
||||
kubeconfig string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
scheme = runtime.NewScheme()
|
||||
config cluster.Config
|
||||
kubeconfig string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logFormat string
|
||||
logger logr.Logger
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = v1beta1.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -57,31 +57,38 @@ func main() {
|
||||
},
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
cmds.InitializeConfig(cmd)
|
||||
logger = log.New(debug)
|
||||
logger = zapr.NewLogger(log.New(debug, logFormat))
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
|
||||
rootCmd.PersistentFlags().StringVar(&clusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImage, "shared-agent-image", "", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImagePullPolicy, "shared-agent-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "agent-shared-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImage, "k3s-image", "rancher/k3k", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImagePullPolicy, "k3s-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.AgentImagePullSecrets, "agent-image-pull-secret", nil, "Image pull secret used for for agents")
|
||||
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logger.Fatalw("failed to run k3k controller", zap.Error(err))
|
||||
logger.Error(err, "failed to run k3k controller")
|
||||
}
|
||||
}
|
||||
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
logger.Info("Starting k3k - Version: " + buildinfo.Version)
|
||||
ctrlruntimelog.SetLogger(logger)
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
@@ -95,8 +102,6 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
|
||||
}
|
||||
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
|
||||
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
|
||||
@@ -109,34 +114,48 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
return fmt.Errorf("failed to add cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding etcd pod controller")
|
||||
logger.Info("adding statefulset controller")
|
||||
|
||||
if err := cluster.AddStatefulSetController(ctx, mgr, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add statefulset controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding service controller")
|
||||
|
||||
if err := cluster.AddServiceController(ctx, mgr, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add service controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding pod controller")
|
||||
|
||||
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
return fmt.Errorf("failed to add pod controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterpolicy controller")
|
||||
|
||||
if err := policy.Add(mgr, clusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
|
||||
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add clusterpolicy controller: %v", err)
|
||||
}
|
||||
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start the manager: %v", err)
|
||||
return fmt.Errorf("failed to start manager: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("controller manager stopped")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate() error {
|
||||
if sharedAgentImagePullPolicy != "" {
|
||||
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
if config.SharedAgentImagePullPolicy != "" {
|
||||
if config.SharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
return errors.New("invalid value for shared agent image policy")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=k3k.io
|
||||
package v1alpha1
|
||||
package v1beta1
|
||||
@@ -1,4 +1,4 @@
|
||||
package v1alpha1
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"}
|
||||
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemBuilder.AddToScheme
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
package v1alpha1
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -103,6 +103,7 @@ type ClusterSpec struct {
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
|
||||
// +optional
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
|
||||
@@ -175,7 +176,137 @@ type ClusterSpec struct {
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
//
|
||||
// +optional
|
||||
CustomCAs CustomCAs `json:"customCAs,omitempty"`
|
||||
CustomCAs *CustomCAs `json:"customCAs,omitempty"`
|
||||
|
||||
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
}
|
||||
|
||||
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
type SyncConfig struct {
|
||||
// Services resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
// +optional
|
||||
Services ServiceSyncConfig `json:"services"`
|
||||
// ConfigMaps resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
// +optional
|
||||
ConfigMaps ConfigMapSyncConfig `json:"configMaps"`
|
||||
// Secrets resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
// +optional
|
||||
Secrets SecretSyncConfig `json:"secrets"`
|
||||
// Ingresses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
// +optional
|
||||
Ingresses IngressSyncConfig `json:"ingresses"`
|
||||
// PersistentVolumeClaims resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
// +optional
|
||||
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims"`
|
||||
// PriorityClasses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
// +optional
|
||||
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
|
||||
}
|
||||
|
||||
// SecretSyncConfig specifies the sync options for services.
|
||||
type SecretSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSyncConfig specifies the sync options for services.
|
||||
type ServiceSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigMapSyncConfig specifies the sync options for services.
|
||||
type ConfigMapSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// IngressSyncConfig specifies the sync options for services.
|
||||
type IngressSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
type PersistentVolumeClaimSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// PriorityClassSyncConfig specifies the sync options for services.
|
||||
type PriorityClassSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
@@ -230,7 +361,7 @@ type PersistenceConfig struct {
|
||||
// StorageRequestSize is the requested size for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +kubebuilder:default="1G"
|
||||
// +kubebuilder:default="2G"
|
||||
// +optional
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
@@ -245,7 +376,7 @@ type ExposeConfig struct {
|
||||
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
|
||||
//
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadBalancer,omitempty"`
|
||||
|
||||
// NodePort specifies options for exposing the API server through NodePort.
|
||||
//
|
||||
@@ -303,32 +434,34 @@ type NodePortConfig struct {
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
type CustomCAs struct {
|
||||
// Enabled toggles this feature on or off.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Sources defines the sources for all required custom CA certificates.
|
||||
Sources CredentialSources `json:"sources,omitempty"`
|
||||
Sources CredentialSources `json:"sources"`
|
||||
}
|
||||
|
||||
// CredentialSources lists all the required credentials, including both
|
||||
// TLS key pairs and single signing keys.
|
||||
type CredentialSources struct {
|
||||
// ServerCA specifies the server-ca cert/key pair.
|
||||
ServerCA CredentialSource `json:"serverCA,omitempty"`
|
||||
ServerCA CredentialSource `json:"serverCA"`
|
||||
|
||||
// ClientCA specifies the client-ca cert/key pair.
|
||||
ClientCA CredentialSource `json:"clientCA,omitempty"`
|
||||
ClientCA CredentialSource `json:"clientCA"`
|
||||
|
||||
// RequestHeaderCA specifies the request-header-ca cert/key pair.
|
||||
RequestHeaderCA CredentialSource `json:"requestHeaderCA,omitempty"`
|
||||
RequestHeaderCA CredentialSource `json:"requestHeaderCA"`
|
||||
|
||||
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
|
||||
ETCDServerCA CredentialSource `json:"etcdServerCA,omitempty"`
|
||||
ETCDServerCA CredentialSource `json:"etcdServerCA"`
|
||||
|
||||
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
|
||||
ETCDPeerCA CredentialSource `json:"etcdPeerCA,omitempty"`
|
||||
ETCDPeerCA CredentialSource `json:"etcdPeerCA"`
|
||||
|
||||
// ServiceAccountToken specifies the service-account-token key.
|
||||
ServiceAccountToken CredentialSource `json:"serviceAccountToken,omitempty"`
|
||||
ServiceAccountToken CredentialSource `json:"serviceAccountToken"`
|
||||
}
|
||||
|
||||
// CredentialSource defines where to get a credential from.
|
||||
@@ -338,8 +471,7 @@ type CredentialSource struct {
|
||||
// The controller expects specific keys inside based on the credential type:
|
||||
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
// - For ServiceAccountTokenKey: 'tls.key'.
|
||||
// +optional
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
SecretName string `json:"secretName"`
|
||||
}
|
||||
|
||||
// ClusterStatus reflects the observed state of a Cluster.
|
||||
@@ -484,6 +616,12 @@ type VirtualClusterPolicySpec struct {
|
||||
//
|
||||
// +optional
|
||||
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
|
||||
|
||||
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
}
|
||||
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -163,7 +163,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
out.CustomCAs = in.CustomCAs
|
||||
if in.CustomCAs != nil {
|
||||
in, out := &in.CustomCAs, &out.CustomCAs
|
||||
*out = new(CustomCAs)
|
||||
**out = **in
|
||||
}
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -203,6 +212,28 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigMapSyncConfig) DeepCopyInto(out *ConfigMapSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapSyncConfig.
|
||||
func (in *ConfigMapSyncConfig) DeepCopy() *ConfigMapSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConfigMapSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
|
||||
*out = *in
|
||||
@@ -307,6 +338,28 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressSyncConfig) DeepCopyInto(out *IngressSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSyncConfig.
|
||||
func (in *IngressSyncConfig) DeepCopy() *IngressSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
|
||||
*out = *in
|
||||
@@ -377,6 +430,115 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimSyncConfig) DeepCopyInto(out *PersistentVolumeClaimSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSyncConfig.
|
||||
func (in *PersistentVolumeClaimSyncConfig) DeepCopy() *PersistentVolumeClaimSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PersistentVolumeClaimSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityClassSyncConfig) DeepCopyInto(out *PriorityClassSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassSyncConfig.
|
||||
func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityClassSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSyncConfig.
|
||||
func (in *SecretSyncConfig) DeepCopy() *SecretSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceSyncConfig) DeepCopyInto(out *ServiceSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSyncConfig.
|
||||
func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
|
||||
*out = *in
|
||||
in.Services.DeepCopyInto(&out.Services)
|
||||
in.ConfigMaps.DeepCopyInto(&out.ConfigMaps)
|
||||
in.Secrets.DeepCopyInto(&out.Secrets)
|
||||
in.Ingresses.DeepCopyInto(&out.Ingresses)
|
||||
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
|
||||
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
|
||||
func (in *SyncConfig) DeepCopy() *SyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
|
||||
*out = *in
|
||||
@@ -461,6 +623,11 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
**out = **in
|
||||
}
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -24,12 +24,12 @@ type ResourceEnsurer interface {
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
cluster *v1beta1.Cluster
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
|
||||
func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
|
||||
return &Config{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
@@ -42,11 +42,8 @@ func configSecretName(clusterName string) string {
|
||||
}
|
||||
|
||||
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("key", key)
|
||||
|
||||
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
|
||||
return err
|
||||
@@ -54,11 +51,15 @@ func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object
|
||||
|
||||
if err := cfg.client.Create(ctx, obj); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj))
|
||||
|
||||
return cfg.client.Update(ctx, obj)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info(fmt.Sprintf("Creating %T.", obj))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
)
|
||||
@@ -31,23 +31,26 @@ const (
|
||||
|
||||
type SharedAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
serviceIP string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
imageRegistry string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int) *SharedAgent {
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
|
||||
return &SharedAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
|
||||
return s.ensureObject(ctx, configSecret)
|
||||
}
|
||||
|
||||
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
|
||||
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
|
||||
version := cluster.Spec.Version
|
||||
if cluster.Spec.Version == "" {
|
||||
version = cluster.Status.HostVersion
|
||||
@@ -156,7 +159,13 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
dnsPolicy = v1.DNSClusterFirstWithHostNet
|
||||
}
|
||||
|
||||
return v1.PodSpec{
|
||||
image := s.image
|
||||
|
||||
if s.imageRegistry != "" {
|
||||
image = s.imageRegistry + "/" + s.image
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
HostNetwork: hostNetwork,
|
||||
DNSPolicy: dnsPolicy,
|
||||
ServiceAccountName: s.Name(),
|
||||
@@ -202,7 +211,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: s.Name(),
|
||||
Image: s.image,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{},
|
||||
@@ -254,6 +263,11 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, imagePullSecret := range s.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
func (s *SharedAgent) service(ctx context.Context) error {
|
||||
@@ -367,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
|
||||
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingresses"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"k3k.io"},
|
||||
Resources: []string{"clusters"},
|
||||
|
||||
@@ -8,12 +8,12 @@ import (
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func Test_sharedAgentData(t *testing.T) {
|
||||
type args struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
cluster *v1beta1.Cluster
|
||||
serviceName string
|
||||
ip string
|
||||
kubeletPort int
|
||||
@@ -29,12 +29,12 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
{
|
||||
name: "simple config",
|
||||
args: args{
|
||||
cluster: &v1alpha1.Cluster{
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Version: "v1.2.3",
|
||||
},
|
||||
},
|
||||
@@ -59,15 +59,15 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
{
|
||||
name: "version in status",
|
||||
args: args{
|
||||
cluster: &v1alpha1.Cluster{
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Version: "v1.2.3",
|
||||
},
|
||||
Status: v1alpha1.ClusterStatus{
|
||||
Status: v1beta1.ClusterStatus{
|
||||
HostVersion: "v1.3.3",
|
||||
},
|
||||
},
|
||||
@@ -92,12 +92,12 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
{
|
||||
name: "missing version in spec",
|
||||
args: args{
|
||||
cluster: &v1alpha1.Cluster{
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Status: v1alpha1.ClusterStatus{
|
||||
Status: v1beta1.ClusterStatus{
|
||||
HostVersion: "v1.3.3",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -22,19 +22,22 @@ const (
|
||||
|
||||
type VirtualAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
serviceIP string
|
||||
token string
|
||||
Image string
|
||||
ImagePullPolicy string
|
||||
ImageRegistry string
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
|
||||
func NewVirtualAgent(config *Config, serviceIP, token, Image, ImagePullPolicy string, imagePullSecrets []string) *VirtualAgent {
|
||||
return &VirtualAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
Image: Image,
|
||||
ImagePullPolicy: ImagePullPolicy,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +87,7 @@ with-node-id: true`, serviceIP, token)
|
||||
}
|
||||
|
||||
func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
image := controller.K3SImage(v.cluster, v.k3SImage)
|
||||
image := controller.K3SImage(v.cluster, v.Image)
|
||||
|
||||
const name = "k3k-agent"
|
||||
|
||||
@@ -183,7 +186,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
|
||||
ImagePullPolicy: v1.PullPolicy(v.ImagePullPolicy),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: ptr.To(true),
|
||||
},
|
||||
@@ -243,5 +246,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
}
|
||||
}
|
||||
|
||||
for _, imagePullSecret := range v.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
35
pkg/controller/cluster/client.go
Normal file
35
pkg/controller/cluster/client.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
// newVirtualClient creates a new Client that can be used to interact with the virtual cluster
|
||||
func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) {
|
||||
var clusterKubeConfig v1.Secret
|
||||
|
||||
kubeconfigSecretName := types.NamespacedName{
|
||||
Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"),
|
||||
Namespace: clusterNamespace,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err)
|
||||
}
|
||||
|
||||
return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{})
|
||||
}
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
@@ -46,7 +46,6 @@ const (
|
||||
namePrefix = "k3k"
|
||||
clusterController = "k3k-cluster-controller"
|
||||
clusterFinalizerName = "cluster.k3k.io/finalizer"
|
||||
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
|
||||
ClusterInvalidName = "system"
|
||||
|
||||
defaultVirtualClusterCIDR = "10.52.0.0/16"
|
||||
@@ -61,26 +60,36 @@ var (
|
||||
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ClusterCIDR string
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
VirtualAgentImage string
|
||||
VirtualAgentImagePullPolicy string
|
||||
K3SServerImage string
|
||||
K3SServerImagePullPolicy string
|
||||
ServerImagePullSecrets []string
|
||||
AgentImagePullSecrets []string
|
||||
}
|
||||
|
||||
type ClusterReconciler struct {
|
||||
DiscoveryClient *discovery.DiscoveryClient
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
PortAllocator *agent.PortAllocator
|
||||
|
||||
record.EventRecorder
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
K3SImage string
|
||||
K3SImagePullPolicy string
|
||||
PortAllocator *agent.PortAllocator
|
||||
Config
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage string, k3SImagePullPolicy string, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
|
||||
func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sharedAgentImage == "" {
|
||||
if config.SharedAgentImage == "" {
|
||||
return errors.New("missing shared agent image")
|
||||
}
|
||||
|
||||
@@ -90,19 +99,25 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := ClusterReconciler{
|
||||
DiscoveryClient: discoveryClient,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
EventRecorder: eventRecorder,
|
||||
SharedAgentImage: sharedAgentImage,
|
||||
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
|
||||
K3SImage: k3SImage,
|
||||
K3SImagePullPolicy: k3SImagePullPolicy,
|
||||
PortAllocator: portAllocator,
|
||||
DiscoveryClient: discoveryClient,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
EventRecorder: eventRecorder,
|
||||
PortAllocator: portAllocator,
|
||||
Config: Config{
|
||||
SharedAgentImage: config.SharedAgentImage,
|
||||
SharedAgentImagePullPolicy: config.SharedAgentImagePullPolicy,
|
||||
VirtualAgentImage: config.VirtualAgentImage,
|
||||
VirtualAgentImagePullPolicy: config.VirtualAgentImagePullPolicy,
|
||||
K3SServerImage: config.K3SServerImage,
|
||||
K3SServerImagePullPolicy: config.K3SServerImagePullPolicy,
|
||||
ServerImagePullSecrets: config.ServerImagePullSecrets,
|
||||
AgentImagePullSecrets: config.AgentImagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.Cluster{}).
|
||||
For(&v1beta1.Cluster{}).
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
|
||||
Owns(&apps.StatefulSet{}).
|
||||
Owns(&v1.Service{}).
|
||||
@@ -133,7 +148,7 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
|
||||
}
|
||||
|
||||
// Enqueue all the Cluster in the namespace
|
||||
var clusterList v1alpha1.ClusterList
|
||||
var clusterList v1beta1.ClusterList
|
||||
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -146,12 +161,10 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("Reconciling Cluster")
|
||||
|
||||
log.Info("reconciling cluster")
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
var cluster v1beta1.Cluster
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
@@ -162,8 +175,10 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
// Set initial status if not already set
|
||||
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
|
||||
cluster.Status.Phase = v1alpha1.ClusterProvisioning
|
||||
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
|
||||
log.V(1).Info("Updating Cluster status phase")
|
||||
|
||||
cluster.Status.Phase = v1beta1.ClusterProvisioning
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -180,6 +195,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// add finalizer
|
||||
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
|
||||
log.V(1).Info("Updating Cluster adding finalizer")
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -192,6 +209,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
reconcilerErr := c.reconcileCluster(ctx, &cluster)
|
||||
|
||||
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
|
||||
log.Info("Updating Cluster status")
|
||||
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -200,7 +219,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
|
||||
log.Info("server not ready, requeueing")
|
||||
log.V(1).Info("Server not ready, requeueing")
|
||||
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
|
||||
}
|
||||
|
||||
@@ -209,6 +228,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// update Cluster if needed
|
||||
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
|
||||
log.Info("Updating Cluster")
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -217,14 +238,14 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
err := c.reconcile(ctx, cluster)
|
||||
c.updateStatus(cluster, err)
|
||||
c.updateStatus(ctx, cluster, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var ns v1.Namespace
|
||||
@@ -236,7 +257,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
cluster.Status.PolicyName = policyName
|
||||
|
||||
if found && policyName != "" {
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -249,7 +270,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
// if the Version is not specified we will try to use the same Kubernetes version of the host.
|
||||
// This version is stored in the Status object, and it will not be updated if already set.
|
||||
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
|
||||
log.Info("cluster version not set")
|
||||
log.V(1).Info("Cluster version not set. Using host version.")
|
||||
|
||||
hostVersion, err := c.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
@@ -266,12 +287,12 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
return err
|
||||
}
|
||||
|
||||
s := server.New(cluster, c.Client, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
s := server.New(cluster, c.Client, token, c.K3SServerImage, c.K3SServerImagePullPolicy, c.ServerImagePullSecrets)
|
||||
|
||||
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
|
||||
if cluster.Status.ClusterCIDR == "" {
|
||||
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
|
||||
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
|
||||
}
|
||||
}
|
||||
@@ -279,8 +300,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
|
||||
if cluster.Status.ServiceCIDR == "" {
|
||||
// in shared mode try to lookup the serviceCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
|
||||
log.Info("looking up Service CIDR for shared mode")
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
|
||||
log.V(1).Info("Looking up Service CIDR for shared mode")
|
||||
|
||||
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
|
||||
if err != nil {
|
||||
@@ -291,8 +312,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
}
|
||||
|
||||
// in virtual mode assign a default serviceCIDR
|
||||
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
|
||||
log.Info("assign default service CIDR for virtual mode")
|
||||
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
|
||||
log.V(1).Info("assign default service CIDR for virtual mode")
|
||||
|
||||
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
|
||||
}
|
||||
@@ -337,9 +358,9 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
}
|
||||
|
||||
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
|
||||
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
|
||||
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring bootstrap secret")
|
||||
log.V(1).Info("Ensuring bootstrap secret")
|
||||
|
||||
bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
|
||||
if err != nil {
|
||||
@@ -369,9 +390,9 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
|
||||
}
|
||||
|
||||
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
|
||||
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
|
||||
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring kubeconfig secret")
|
||||
log.V(1).Info("Ensuring Kubeconfig Secret")
|
||||
|
||||
adminKubeconfig := kubeconfig.New()
|
||||
|
||||
@@ -407,7 +428,7 @@ func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
|
||||
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server, serviceIP string) error {
|
||||
// create init node config
|
||||
initServerConfig, err := server.Config(true, serviceIP)
|
||||
if err != nil {
|
||||
@@ -443,9 +464,9 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policy")
|
||||
log.V(1).Info("Ensuring network policy")
|
||||
|
||||
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
|
||||
|
||||
@@ -529,15 +550,15 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
|
||||
key := client.ObjectKeyFromObject(currentNetworkPolicy)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster network policy updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster network policy updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
|
||||
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring cluster service")
|
||||
log.V(1).Info("Ensuring Cluster Service")
|
||||
|
||||
expectedService := server.Service(cluster)
|
||||
currentService := expectedService.DeepCopy()
|
||||
@@ -557,15 +578,15 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
|
||||
key := client.ObjectKeyFromObject(currentService)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster service updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster service updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return currentService, nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring cluster ingress")
|
||||
log.V(1).Info("Ensuring cluster ingress")
|
||||
|
||||
expectedServerIngress := server.Ingress(ctx, cluster)
|
||||
|
||||
@@ -593,13 +614,13 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
|
||||
|
||||
key := client.ObjectKeyFromObject(currentServerIngress)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster ingress updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster ingress updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
|
||||
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// create headless service for the statefulset
|
||||
@@ -619,6 +640,9 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the finalizer to the StatefulSet so the statefulset controller can handle cleanup.
|
||||
controllerutil.AddFinalizer(expectedServerStatefulSet, etcdPodFinalizerName)
|
||||
|
||||
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
|
||||
@@ -632,13 +656,13 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
key := client.ObjectKeyFromObject(currentServerStatefulSet)
|
||||
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
|
||||
log.V(1).Info("Ensuring server StatefulSet", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
|
||||
|
||||
var err error
|
||||
@@ -668,12 +692,12 @@ func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alp
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
|
||||
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
|
||||
config := agent.NewConfig(cluster, c.Client, c.Scheme)
|
||||
|
||||
var agentEnsurer agent.ResourceEnsurer
|
||||
if cluster.Spec.Mode == agent.VirtualNodeMode {
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.VirtualAgentImage, c.VirtualAgentImagePullPolicy, c.AgentImagePullSecrets)
|
||||
} else {
|
||||
// Assign port from pool if shared agent enabled mirroring of host nodes
|
||||
kubeletPort := 10250
|
||||
@@ -697,13 +721,13 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
|
||||
cluster.Status.WebhookPort = webhookPort
|
||||
}
|
||||
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort)
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
|
||||
}
|
||||
|
||||
return agentEnsurer.EnsureResources(ctx)
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.VirtualClusterPolicy) error {
|
||||
if cluster.Name == ClusterInvalidName {
|
||||
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
|
||||
}
|
||||
@@ -712,12 +736,17 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.
|
||||
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
|
||||
}
|
||||
|
||||
if cluster.Spec.CustomCAs.Enabled {
|
||||
if err := c.validateCustomCACerts(cluster); err != nil {
|
||||
if cluster.Spec.CustomCAs != nil && cluster.Spec.CustomCAs.Enabled {
|
||||
if err := c.validateCustomCACerts(cluster.Spec.CustomCAs.Sources); err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
|
||||
}
|
||||
}
|
||||
|
||||
// validate sync policy
|
||||
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
|
||||
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -730,7 +759,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
// Try to look for the serviceCIDR creating a failing service.
|
||||
// The error should contain the expected serviceCIDR
|
||||
|
||||
log.Info("looking up serviceCIDR from a failing service creation")
|
||||
log.V(1).Info("Looking up Service CIDR from a failing service creation")
|
||||
|
||||
failingSvc := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
|
||||
@@ -742,7 +771,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
if len(splittedErrMsg) > 1 {
|
||||
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
|
||||
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
|
||||
log.V(1).Info("Found Service CIDR from failing service creation: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
@@ -756,7 +785,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
|
||||
|
||||
log.Info("looking up serviceCIDR from kube-apiserver pod")
|
||||
log.V(1).Info("Looking up Service CIDR from kube-apiserver pod")
|
||||
|
||||
matchingLabels := client.MatchingLabels(map[string]string{
|
||||
"component": "kube-apiserver",
|
||||
@@ -779,12 +808,12 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
for _, arg := range apiServerArgs {
|
||||
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
|
||||
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
|
||||
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
|
||||
log.V(1).Info("Found Service CIDR from kube-apiserver pod: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
if err != nil {
|
||||
log.Error(err, "serviceCIDR is not valid")
|
||||
log.Error(err, "Service CIDR is not valid")
|
||||
break
|
||||
}
|
||||
|
||||
@@ -799,8 +828,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
}
|
||||
|
||||
// validateCustomCACerts will make sure that all the cert secrets exists
|
||||
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
|
||||
credentialSources := cluster.Spec.CustomCAs.Sources
|
||||
func (c *ClusterReconciler) validateCustomCACerts(credentialSources v1beta1.CredentialSources) error {
|
||||
if credentialSources.ClientCA.SecretName == "" ||
|
||||
credentialSources.ServerCA.SecretName == "" ||
|
||||
credentialSources.ETCDPeerCA.SecretName == "" ||
|
||||
|
||||
@@ -12,21 +12,23 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
|
||||
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("finalizing Cluster")
|
||||
log.V(1).Info("Deleting Cluster")
|
||||
|
||||
// Set the Terminating phase and condition
|
||||
cluster.Status.Phase = v1alpha1.ClusterTerminating
|
||||
cluster.Status.Phase = v1beta1.ClusterTerminating
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -39,8 +41,8 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
|
||||
}
|
||||
|
||||
// Deallocate ports for kubelet and webhook if used
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
|
||||
log.Info("dellocating ports for kubelet and webhook")
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
|
||||
log.V(1).Info("dellocating ports for kubelet and webhook")
|
||||
|
||||
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -51,8 +53,25 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
|
||||
}
|
||||
}
|
||||
|
||||
// delete API server lease
|
||||
lease := &coordinationv1.Lease{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Lease",
|
||||
APIVersion: "coordination.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cluster.Name,
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
}
|
||||
if err := c.Client.Delete(ctx, lease); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Remove finalizer from the cluster and update it only when all resources are cleaned up
|
||||
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
|
||||
log.Info("Deleting Cluster removing finalizer")
|
||||
|
||||
if err := c.Client.Update(ctx, cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -61,7 +80,10 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Unbinding ClusterRoles")
|
||||
|
||||
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
|
||||
@@ -71,7 +71,12 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "", 50, portAllocator, &record.FakeRecorder{})
|
||||
clusterConfig := &cluster.Config{
|
||||
SharedAgentImage: "rancher/k3k-kubelet:latest",
|
||||
K3SServerImage: "rancher/k3s",
|
||||
VirtualAgentImage: "rancher/k3s",
|
||||
}
|
||||
err = cluster.Add(ctx, mgr, clusterConfig, 50, portAllocator, &record.FakeRecorder{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
@@ -94,7 +99,7 @@ func buildScheme() *runtime.Scheme {
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
|
||||
@@ -38,7 +38,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
When("creating a Cluster", func() {
|
||||
It("will be created with some defaults", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
@@ -48,15 +48,28 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
|
||||
Expect(cluster.Spec.Mode).To(Equal(v1beta1.SharedClusterMode))
|
||||
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
|
||||
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
|
||||
Expect(cluster.Spec.Version).To(BeEmpty())
|
||||
|
||||
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
|
||||
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
|
||||
Expect(cluster.Spec.CustomCAs).To(BeNil())
|
||||
|
||||
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
|
||||
// sync
|
||||
// enabled by default
|
||||
Expect(cluster.Spec.Sync).To(Not(BeNil()))
|
||||
Expect(cluster.Spec.Sync.ConfigMaps.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.PersistentVolumeClaims.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.Secrets.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.Services.Enabled).To(BeTrue())
|
||||
// disabled by default
|
||||
Expect(cluster.Spec.Sync.Ingresses.Enabled).To(BeFalse())
|
||||
Expect(cluster.Spec.Sync.PriorityClasses.Enabled).To(BeFalse())
|
||||
|
||||
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
|
||||
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
|
||||
|
||||
Expect(cluster.Status.Phase).To(Equal(v1beta1.ClusterUnknown))
|
||||
|
||||
serverVersion, err := k8s.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -92,14 +105,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
When("exposing the cluster with nodePort", func() {
|
||||
It("will have a NodePort service", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Expose: &v1beta1.ExposeConfig{
|
||||
NodePort: &v1beta1.NodePortConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -124,14 +137,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
})
|
||||
|
||||
It("will have the specified ports exposed when specified", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Expose: &v1beta1.ExposeConfig{
|
||||
NodePort: &v1beta1.NodePortConfig{
|
||||
ServerPort: ptr.To[int32](30010),
|
||||
ETCDPort: ptr.To[int32](30011),
|
||||
},
|
||||
@@ -173,14 +186,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
})
|
||||
|
||||
It("will not expose the port when out of range", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Expose: &v1beta1.ExposeConfig{
|
||||
NodePort: &v1beta1.NodePortConfig{
|
||||
ETCDPort: ptr.To[int32](2222),
|
||||
},
|
||||
},
|
||||
@@ -218,14 +231,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
When("exposing the cluster with loadbalancer", func() {
|
||||
It("will have a LoadBalancer service with the default ports exposed", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Expose: &v1beta1.ExposeConfig{
|
||||
LoadBalancer: &v1beta1.LoadBalancerConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -263,6 +276,26 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(etcdPort.TargetPort.IntValue()).To(BeEquivalentTo(2379))
|
||||
})
|
||||
})
|
||||
|
||||
When("exposing the cluster with nodePort and loadbalancer", func() {
|
||||
It("will fail", func() {
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Expose: &v1beta1.ExposeConfig{
|
||||
LoadBalancer: &v1beta1.LoadBalancerConfig{},
|
||||
NodePort: &v1beta1.NodePortConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
38
pkg/controller/cluster/filter.go
Normal file
38
pkg/controller/cluster/filter.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func newClusterPredicate() predicate.Predicate {
|
||||
return predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
owner := metav1.GetControllerOf(object)
|
||||
|
||||
return owner != nil &&
|
||||
owner.Kind == "Cluster" &&
|
||||
owner.APIVersion == v1beta1.SchemeGroupVersion.String()
|
||||
})
|
||||
}
|
||||
|
||||
func clusterNamespacedName(object client.Object) types.NamespacedName {
|
||||
var clusterName string
|
||||
|
||||
owner := metav1.GetControllerOf(object)
|
||||
if owner != nil && owner.Kind == "Cluster" && owner.APIVersion == v1beta1.SchemeGroupVersion.String() {
|
||||
clusterName = owner.Name
|
||||
} else {
|
||||
clusterName = object.GetLabels()[translate.ClusterNameLabel]
|
||||
}
|
||||
|
||||
return types.NamespacedName{
|
||||
Name: clusterName,
|
||||
Namespace: object.GetNamespace(),
|
||||
}
|
||||
}
|
||||
@@ -2,36 +2,18 @@ package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,235 +25,54 @@ type PodReconciler struct {
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
// AddPodController adds a new controller for Pods to the manager.
|
||||
// It will reconcile the Pods of the Host Cluster with the one of the Virtual Cluster.
|
||||
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
|
||||
For(&v1.Pod{}).
|
||||
Named(podController).
|
||||
WithEventFilter(newClusterPredicate()).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("statefulset", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Reconciling Pod")
|
||||
|
||||
s := strings.Split(req.Name, "-")
|
||||
if len(s) < 1 {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if s[0] != "k3k" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
clusterName := s[1]
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
|
||||
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
|
||||
matchingLabels.ApplyToList(listOpts)
|
||||
|
||||
var podList v1.PodList
|
||||
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
|
||||
var pod v1.Pod
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 1 {
|
||||
return reconcile.Result{}, nil
|
||||
// get cluster from the object
|
||||
cluster := clusterNamespacedName(&pod)
|
||||
|
||||
virtualClient, err := newVirtualClient(ctx, r.Client, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
virtName := pod.GetAnnotations()[translate.ResourceNameAnnotation]
|
||||
virtNamespace := pod.GetAnnotations()[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
virtPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: virtName,
|
||||
Namespace: virtNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting Virtual Pod", "name", virtName, "namespace", virtNamespace)
|
||||
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("handling server pod")
|
||||
|
||||
role, found := pod.Labels["role"]
|
||||
if !found {
|
||||
return fmt.Errorf("server pod has no role label")
|
||||
}
|
||||
|
||||
if role != "server" {
|
||||
log.V(1).Info("pod has a different role: " + role)
|
||||
return nil
|
||||
}
|
||||
|
||||
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
|
||||
if !pod.DeletionTimestamp.IsZero() {
|
||||
// check if cluster is deleted then remove the finalizer from the pod
|
||||
if cluster.Name == "" {
|
||||
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove server from etcd
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{
|
||||
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
|
||||
},
|
||||
TLS: tlsConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
|
||||
return p.Client.Update(ctx, pod)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluster) (*tls.Config, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("generating etcd TLS client certificate", "cluster", cluster)
|
||||
|
||||
token, err := p.clusterToken(ctx, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
|
||||
return true
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// create rootCA CertPool
|
||||
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(cert[0])
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
|
||||
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("removing peer from cluster", "name", name, "address", address)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
|
||||
defer cancel()
|
||||
|
||||
members, err := client.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, member := range members.Members {
|
||||
if !strings.Contains(member.Name, name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, peerURL := range member.PeerURLs {
|
||||
u, err := url.Parse(peerURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if u.Hostname() == address {
|
||||
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
|
||||
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
|
||||
var tokenSecret v1.Secret
|
||||
|
||||
nn := types.NamespacedName{
|
||||
Name: TokenSecretName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if cluster.Spec.TokenSecretRef != nil {
|
||||
nn.Name = TokenSecretName(cluster.Name)
|
||||
}
|
||||
|
||||
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, ok := tokenSecret.Data["token"]; !ok {
|
||||
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -39,7 +39,7 @@ type content struct {
|
||||
// Generate generates the bootstrap for the cluster:
|
||||
// 1- use the server token to get the bootstrap data from k3s
|
||||
// 2- save the bootstrap data as a secret
|
||||
func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) {
|
||||
func GenerateBootstrapData(ctx context.Context, cluster *v1beta1.Cluster, ip, token string) ([]byte, error) {
|
||||
bootstrap, err := requestBootstrap(token, ip)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to request bootstrap secret: %w", err)
|
||||
@@ -162,7 +162,7 @@ func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster) (*ControlRuntimeBootstrap, error) {
|
||||
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1beta1.Cluster) (*ControlRuntimeBootstrap, error) {
|
||||
key := types.NamespacedName{
|
||||
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
|
||||
Namespace: cluster.Namespace,
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
@@ -45,15 +45,15 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
|
||||
func serverConfigData(serviceIP string, cluster *v1beta1.Cluster, token string) string {
|
||||
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
|
||||
}
|
||||
|
||||
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
|
||||
func initConfigData(cluster *v1beta1.Cluster, token string) string {
|
||||
return "cluster-init: true\n" + serverOptions(cluster, token)
|
||||
}
|
||||
|
||||
func serverOptions(cluster *v1alpha1.Cluster, token string) string {
|
||||
func serverOptions(cluster *v1beta1.Cluster, token string) string {
|
||||
var opts string
|
||||
|
||||
// TODO: generate token if not found
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ func IngressName(clusterName string) string {
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, "ingress")
|
||||
}
|
||||
|
||||
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingress {
|
||||
func Ingress(ctx context.Context, cluster *v1beta1.Cluster) networkingv1.Ingress {
|
||||
ingress := networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Ingress",
|
||||
@@ -52,7 +52,7 @@ func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingres
|
||||
return ingress
|
||||
}
|
||||
|
||||
func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
|
||||
func ingressRules(cluster *v1beta1.Cluster) []networkingv1.IngressRule {
|
||||
var ingressRules []networkingv1.IngressRule
|
||||
|
||||
if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil {
|
||||
|
||||
@@ -13,12 +13,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
@@ -32,22 +33,24 @@ const (
|
||||
|
||||
// Server
|
||||
type Server struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
cluster *v1beta1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token string, k3SImage string, k3SImagePullPolicy string) *Server {
|
||||
func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
|
||||
return &Server{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: string(cluster.Spec.Mode),
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: string(cluster.Spec.Mode),
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +122,7 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
@@ -242,6 +245,11 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
|
||||
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, s.cluster.Spec.ServerEnvs...)
|
||||
|
||||
// add image pull secrets
|
||||
for _, imagePullSecret := range s.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -253,14 +261,18 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
persistent bool
|
||||
)
|
||||
|
||||
image := controller.K3SImage(s.cluster, s.k3SImage)
|
||||
image := controller.K3SImage(s.cluster, s.image)
|
||||
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
|
||||
|
||||
replicas = *s.cluster.Spec.Servers
|
||||
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
|
||||
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
|
||||
persistent = true
|
||||
pvClaim = s.setupDynamicPersistence()
|
||||
|
||||
if err := controllerutil.SetControllerReference(s.cluster, &pvClaim, s.client.Scheme()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -323,7 +335,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
volumeMounts = append(volumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
if s.cluster.Spec.CustomCAs.Enabled {
|
||||
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
|
||||
vols, mounts, err := s.loadCACertBundle(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -372,7 +384,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
},
|
||||
},
|
||||
}
|
||||
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
|
||||
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
|
||||
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
|
||||
}
|
||||
|
||||
@@ -427,6 +439,10 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
}
|
||||
|
||||
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
if s.cluster.Spec.CustomCAs == nil {
|
||||
return nil, nil, fmt.Errorf("customCAs not found")
|
||||
}
|
||||
|
||||
customCerts := s.cluster.Spec.CustomCAs.Sources
|
||||
caCertMap := map[string]string{
|
||||
"server-ca": customCerts.ServerCA.SecretName,
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
func Service(cluster *v1beta1.Cluster) *v1.Service {
|
||||
service := &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
@@ -69,7 +69,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
}
|
||||
|
||||
// addLoadBalancerPorts adds the load balancer ports to the service
|
||||
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1beta1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
// If the server port is not specified, use the default port
|
||||
if loadbalancerConfig.ServerPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
@@ -90,7 +90,7 @@ func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadB
|
||||
}
|
||||
|
||||
// addNodePortPorts adds the node port ports to the service
|
||||
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
func addNodePortPorts(service *v1.Service, nodePortConfig v1beta1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
|
||||
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
|
||||
if nodePortConfig.ServerPort == nil {
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
|
||||
|
||||
93
pkg/controller/cluster/service.go
Normal file
93
pkg/controller/cluster/service.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceController = "k3k-service-controller"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
HostClient ctrlruntimeclient.Client
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
|
||||
reconciler := ServiceReconciler{
|
||||
HostClient: mgr.GetClient(),
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(serviceController).
|
||||
For(&v1.Service{}).
|
||||
WithEventFilter(newClusterPredicate()).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Reconciling Service")
|
||||
|
||||
var hostService v1.Service
|
||||
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Some services are owned by the cluster but don't have the annotations set (i.e. the kubelet svc)
|
||||
// They don't exists in the virtual cluster, so we can skip them
|
||||
|
||||
virtualServiceName, virtualServiceNameFound := hostService.Annotations[translate.ResourceNameAnnotation]
|
||||
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
if !virtualServiceNameFound || !virtualServiceNamespaceFound {
|
||||
log.V(1).Info(fmt.Sprintf("Service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// get cluster from the object
|
||||
cluster := clusterNamespacedName(&hostService)
|
||||
|
||||
virtualClient, err := newVirtualClient(ctx, r.HostClient, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get cluster info: %v", err)
|
||||
}
|
||||
|
||||
if !hostService.DeletionTimestamp.IsZero() {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
virtualServiceKey := types.NamespacedName{
|
||||
Name: virtualServiceName,
|
||||
Namespace: virtualServiceNamespace,
|
||||
}
|
||||
|
||||
var virtualService v1.Service
|
||||
if err := virtualClient.Get(ctx, virtualServiceKey, &virtualService); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get virtual service: %v", err)
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
|
||||
log.V(1).Info("Updating Virtual Service Status", "name", virtualServiceName, "namespace", virtualServiceNamespace)
|
||||
|
||||
virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer
|
||||
|
||||
if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
330
pkg/controller/cluster/statefulset.go
Normal file
330
pkg/controller/cluster/statefulset.go
Normal file
@@ -0,0 +1,330 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
)
|
||||
|
||||
const (
|
||||
statefulsetController = "k3k-statefulset-controller"
|
||||
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type StatefulSetReconciler struct {
|
||||
Client ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := StatefulSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&apps.StatefulSet{}).
|
||||
Owns(&v1.Pod{}).
|
||||
Named(statefulsetController).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("Reconciling StatefulSet")
|
||||
|
||||
var sts apps.StatefulSet
|
||||
if err := p.Client.Get(ctx, req.NamespacedName, &sts); err != nil {
|
||||
// we can ignore the IsNotFound error
|
||||
// if the stateful set was deleted we have already cleaned up the pods
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// If the StatefulSet is being deleted, we need to remove the finalizers from its pods
|
||||
// and remove the finalizer from the StatefulSet itself.
|
||||
if !sts.DeletionTimestamp.IsZero() {
|
||||
return p.handleDeletion(ctx, &sts)
|
||||
}
|
||||
|
||||
// get cluster name from the object
|
||||
clusterKey := clusterNamespacedName(&sts)
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
if err := p.Client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
podList, err := p.listPods(ctx, &sts)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if len(podList.Items) == 1 {
|
||||
serverPod := podList.Items[0]
|
||||
if !serverPod.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.RemoveFinalizer(&serverPod, etcdPodFinalizerName) {
|
||||
if err := p.Client.Update(ctx, &serverPod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1beta1.Cluster, pod *v1.Pod) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Handling Server Pod")
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Server Pod is being deleted. Removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
return p.Client.Update(ctx, pod)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
|
||||
|
||||
// check if cluster is deleted then remove the finalizer from the pod
|
||||
if cluster.Name == "" {
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Cluster was deleted. Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove server from etcd
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{
|
||||
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
|
||||
},
|
||||
TLS: tlsConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1.Cluster) (*tls.Config, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Generating ETCD TLS client certificate", "cluster", cluster)
|
||||
|
||||
token, err := p.clusterToken(ctx, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
|
||||
return true
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// create rootCA CertPool
|
||||
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(cert[0])
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
|
||||
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Removing peer from cluster", "name", name, "address", address)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
|
||||
defer cancel()
|
||||
|
||||
members, err := client.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, member := range members.Members {
|
||||
if !strings.Contains(member.Name, name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, peerURL := range member.PeerURLs {
|
||||
u, err := url.Parse(peerURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if u.Hostname() == address {
|
||||
log.V(1).Info("Removing member from ETCD", "name", member.Name, "id", member.ID, "address", address)
|
||||
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) clusterToken(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
|
||||
var tokenSecret v1.Secret
|
||||
|
||||
nn := types.NamespacedName{
|
||||
Name: TokenSecretName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if cluster.Spec.TokenSecretRef != nil {
|
||||
nn.Name = TokenSecretName(cluster.Name)
|
||||
}
|
||||
|
||||
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, ok := tokenSecret.Data["token"]; !ok {
|
||||
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.StatefulSet) (ctrl.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
podList, err := p.listPods(ctx, sts)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Updating Server Pod removing finalizer", "name", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if controllerutil.RemoveFinalizer(sts, etcdPodFinalizerName) {
|
||||
return reconcile.Result{}, p.Client.Update(ctx, sts)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) listPods(ctx context.Context, sts *apps.StatefulSet) (*v1.PodList, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create selector from statefulset: %w", err)
|
||||
}
|
||||
|
||||
listOpts := &ctrlruntimeclient.ListOptions{
|
||||
Namespace: sts.Namespace,
|
||||
LabelSelector: selector,
|
||||
}
|
||||
|
||||
var podList v1.PodList
|
||||
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
|
||||
return nil, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return &podList, nil
|
||||
}
|
||||
@@ -1,14 +1,16 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
)
|
||||
|
||||
@@ -24,9 +26,12 @@ const (
|
||||
ReasonTerminating = "Terminating"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
|
||||
func (c *ClusterReconciler) updateStatus(ctx context.Context, cluster *v1beta1.Cluster, reconcileErr error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Updating Cluster Conditions")
|
||||
|
||||
if !cluster.DeletionTimestamp.IsZero() {
|
||||
cluster.Status.Phase = v1alpha1.ClusterTerminating
|
||||
cluster.Status.Phase = v1beta1.ClusterTerminating
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -39,7 +44,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
|
||||
|
||||
// Handle validation errors specifically to set the Pending phase.
|
||||
if errors.Is(reconcileErr, ErrClusterValidation) {
|
||||
cluster.Status.Phase = v1alpha1.ClusterPending
|
||||
cluster.Status.Phase = v1beta1.ClusterPending
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -53,7 +58,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
|
||||
}
|
||||
|
||||
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
|
||||
cluster.Status.Phase = v1alpha1.ClusterProvisioning
|
||||
cluster.Status.Phase = v1beta1.ClusterProvisioning
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -66,7 +71,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
|
||||
|
||||
// If there's an error, but it's not a validation error, the cluster is in a failed state.
|
||||
if reconcileErr != nil {
|
||||
cluster.Status.Phase = v1alpha1.ClusterFailed
|
||||
cluster.Status.Phase = v1beta1.ClusterFailed
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
@@ -80,7 +85,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
|
||||
}
|
||||
|
||||
// If we reach here, everything is successful.
|
||||
cluster.Status.Phase = v1alpha1.ClusterReady
|
||||
cluster.Status.Phase = v1beta1.ClusterReady
|
||||
newCondition := metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionTrue,
|
||||
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
|
||||
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
|
||||
if cluster.Spec.TokenSecretRef == nil {
|
||||
return c.ensureTokenSecret(ctx, cluster)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
|
||||
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// check if the secret is already created
|
||||
@@ -62,7 +62,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
log.Info("Token secret is not specified, creating a random token")
|
||||
log.V(1).Info("Token secret is not specified, creating a random token")
|
||||
|
||||
token, err := random(16)
|
||||
if err != nil {
|
||||
@@ -77,7 +77,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1al
|
||||
})
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("ensuring tokenSecret", "key", key, "result", result)
|
||||
log.V(1).Info("Ensuring tokenSecret", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return token, err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,19 +25,21 @@ var Backoff = wait.Backoff{
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// K3SImage returns the rancher/k3s image tagged with the specified Version.
|
||||
// Image returns the rancher/k3s image tagged with the specified Version.
|
||||
// If Version is empty it will use with the same k8s version of the host cluster,
|
||||
// stored in the Status object. It will return the untagged version as last fallback.
|
||||
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
|
||||
// stored in the Status object. It will return the latest version as last fallback.
|
||||
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
|
||||
image := k3SImage
|
||||
|
||||
imageVersion := "latest"
|
||||
|
||||
if cluster.Spec.Version != "" {
|
||||
return k3SImage + ":" + cluster.Spec.Version
|
||||
imageVersion = cluster.Spec.Version
|
||||
} else if cluster.Status.HostVersion != "" {
|
||||
imageVersion = cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
if cluster.Status.HostVersion != "" {
|
||||
return k3SImage + ":" + cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
return k3SImage
|
||||
return image + ":" + imageVersion
|
||||
}
|
||||
|
||||
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.
|
||||
|
||||
77
pkg/controller/controller_test.go
Normal file
77
pkg/controller/controller_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func Test_K3S_Image(t *testing.T) {
|
||||
type args struct {
|
||||
cluster *v1beta1.Cluster
|
||||
k3sImage string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expectedData string
|
||||
}{
|
||||
{
|
||||
name: "cluster with assigned version spec",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Version: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "cluster with empty version spec and assigned hostVersion status",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Status: v1beta1.ClusterStatus{
|
||||
HostVersion: "v4.5.6",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:v4.5.6",
|
||||
},
|
||||
{
|
||||
name: "cluster with empty version spec and empty hostVersion status",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:latest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fullImage := K3SImage(tt.args.cluster, tt.args.k3sImage)
|
||||
assert.Equal(t, tt.expectedData, fullImage)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
@@ -39,7 +39,7 @@ func New() *KubeConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
|
||||
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
|
||||
bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -93,7 +93,7 @@ func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi
|
||||
return config
|
||||
}
|
||||
|
||||
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, serverPort int) (string, error) {
|
||||
func getURLFromService(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, serverPort int) (string, error) {
|
||||
// get the server service to extract the right IP
|
||||
key := types.NamespacedName{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
|
||||
@@ -11,13 +11,13 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling PSA labels")
|
||||
log.V(1).Info("Reconciling PSA labels")
|
||||
|
||||
// cleanup of old labels
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
|
||||
@@ -33,7 +33,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
|
||||
|
||||
// skip the 'warn' only for the privileged PSA level
|
||||
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
|
||||
if psaLevel != v1beta1.PrivilegedPodSecurityAdmissionLevel {
|
||||
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
|
||||
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
|
||||
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
|
||||
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("deleting resources")
|
||||
log.V(1).Info("Cleanup Namespace resources")
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces); err != nil {
|
||||
|
||||
@@ -11,13 +11,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
log.V(1).Info("Reconciling NetworkPolicy")
|
||||
|
||||
var cidrList []string
|
||||
|
||||
@@ -46,20 +46,25 @@ func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Cont
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if policy.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
log.V(1).Info("Deleting NetworkPolicy")
|
||||
|
||||
return client.IgnoreNotFound(c.Client.Delete(ctx, networkPolicy))
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating NetworkPolicy")
|
||||
|
||||
// otherwise try to create/update
|
||||
err := c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("NetworkPolicy already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
|
||||
func networkPolicy(namespaceName string, policy *v1beta1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -46,10 +46,10 @@ func Add(mgr manager.Manager, clusterCIDR string, maxConcurrentReconciles int) e
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.VirtualClusterPolicy{}).
|
||||
For(&v1beta1.VirtualClusterPolicy{}).
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler()).
|
||||
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
|
||||
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
|
||||
Watches(&v1beta1.Cluster{}, clusterEventHandler(&reconciler)).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&v1.ResourceQuota{}).
|
||||
Owns(&v1.LimitRange{}).
|
||||
@@ -129,7 +129,7 @@ func namespaceEventHandler() handler.Funcs {
|
||||
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
// enqueue all the available VirtualClusterPolicies
|
||||
enqueueAllVCPs := func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
vcpList := &v1alpha1.VirtualClusterPolicyList{}
|
||||
vcpList := &v1beta1.VirtualClusterPolicyList{}
|
||||
if err := r.Client.List(ctx, vcpList); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
return handler.Funcs{
|
||||
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
cluster, ok := e.Object.(*v1alpha1.Cluster)
|
||||
cluster, ok := e.Object.(*v1beta1.Cluster)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -210,8 +210,8 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
|
||||
// and if some of its spec influenced by the policy changed
|
||||
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
|
||||
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
|
||||
oldCluster, okOld := e.ObjectOld.(*v1beta1.Cluster)
|
||||
newCluster, okNew := e.ObjectNew.(*v1beta1.Cluster)
|
||||
|
||||
if !okOld || !okNew {
|
||||
return
|
||||
@@ -248,9 +248,9 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling VirtualClusterPolicy")
|
||||
log.Info("Reconciling VirtualClusterPolicy")
|
||||
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
@@ -261,6 +261,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
|
||||
// update Status if needed
|
||||
if !reflect.DeepEqual(orig.Status, policy.Status) {
|
||||
log.Info("Updating VirtualClusterPolicy Status")
|
||||
|
||||
if err := c.Client.Status().Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -273,6 +275,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
|
||||
// update VirtualClusterPolicy if needed
|
||||
if !reflect.DeepEqual(orig, policy) {
|
||||
log.Info("Updating VirtualClusterPolicy")
|
||||
|
||||
if err := c.Client.Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -281,7 +285,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -293,9 +297,9 @@ func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling matching Namespaces")
|
||||
log.V(1).Info("Reconciling matching Namespaces")
|
||||
|
||||
listOpts := client.MatchingLabels{
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
@@ -307,8 +311,10 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
ctx = ctrl.LoggerInto(ctx, log.WithValues("namespace", ns.Name))
|
||||
log.Info("reconciling Namespace")
|
||||
log = log.WithValues("namespace", ns.Name)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
log.V(1).Info("Reconciling Namespace")
|
||||
|
||||
orig := ns.DeepCopy()
|
||||
|
||||
@@ -331,6 +337,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
c.reconcileNamespacePodSecurityLabels(ctx, &ns, policy)
|
||||
|
||||
if !reflect.DeepEqual(orig, &ns) {
|
||||
log.Info("Updating Namespace")
|
||||
|
||||
if err := c.Client.Update(ctx, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -340,9 +348,9 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling ResourceQuota")
|
||||
log.V(1).Info("Reconciling ResourceQuota")
|
||||
|
||||
if policy.Spec.Quota == nil {
|
||||
// check if resourceQuota object exists and deletes it.
|
||||
@@ -357,6 +365,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting ResourceQuota")
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteResourceQuota)
|
||||
}
|
||||
|
||||
@@ -381,17 +391,21 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating ResourceQuota")
|
||||
|
||||
err := c.Client.Create(ctx, resourceQuota)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("ResourceQuota already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, resourceQuota)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling LimitRange")
|
||||
log.V(1).Info("Reconciling LimitRange")
|
||||
|
||||
// delete limitrange if spec.limits isnt specified.
|
||||
if policy.Spec.Limit == nil {
|
||||
@@ -406,6 +420,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting LimitRange")
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteLimitRange)
|
||||
}
|
||||
|
||||
@@ -429,19 +445,23 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating LimitRange")
|
||||
|
||||
err := c.Client.Create(ctx, limitRange)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("LimitRange already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, limitRange)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
log.V(1).Info("Reconciling Clusters")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -455,6 +475,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -81,7 +81,7 @@ func buildScheme() *runtime.Scheme {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
|
||||
@@ -8,13 +8,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
@@ -25,25 +26,25 @@ import (
|
||||
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
|
||||
Context("creating a VirtualClusterPolicy", func() {
|
||||
It("should have the 'shared' allowedMode", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.SharedClusterMode))
|
||||
})
|
||||
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1alpha1.VirtualClusterMode,
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1beta1.VirtualClusterMode,
|
||||
})
|
||||
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.VirtualClusterMode))
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
policy := &v1beta1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1alpha1.ClusterMode("non-existing"),
|
||||
Spec: v1beta1.VirtualClusterPolicySpec{
|
||||
AllowedMode: v1beta1.ClusterMode("non-existing"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -66,7 +67,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should create a NetworkPolicy", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policies etc
|
||||
@@ -121,7 +122,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should recreate the NetworkPolicy if deleted", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
// look for network policy
|
||||
@@ -163,12 +164,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
It("should add and update the proper pod-security labels to the namespace", func() {
|
||||
var (
|
||||
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
|
||||
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
|
||||
privileged = v1beta1.PrivilegedPodSecurityAdmissionLevel
|
||||
baseline = v1beta1.BaselinePodSecurityAdmissionLevel
|
||||
restricted = v1beta1.RestrictedPodSecurityAdmissionLevel
|
||||
)
|
||||
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
@@ -195,8 +196,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
// Check baseline
|
||||
|
||||
// get policy again
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = &baseline
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
@@ -259,9 +264,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should restore the labels if Namespace is updated", func() {
|
||||
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
privileged := v1beta1.PrivilegedPodSecurityAdmissionLevel
|
||||
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
})
|
||||
|
||||
@@ -303,19 +308,19 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Mode: v1beta1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
@@ -337,7 +342,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
@@ -345,13 +350,13 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Mode: v1beta1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
},
|
||||
@@ -373,18 +378,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
cluster := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.SharedClusterMode,
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Mode: v1beta1.SharedClusterMode,
|
||||
Servers: ptr.To[int32](1),
|
||||
Agents: ptr.To[int32](0),
|
||||
NodeSelector: map[string]string{"label-1": "value-1"},
|
||||
@@ -421,7 +426,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1alpha1.Cluster
|
||||
var updatedCluster v1beta1.Cluster
|
||||
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
@@ -434,7 +439,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
@@ -462,7 +467,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if Quota is deleted", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
@@ -486,8 +491,11 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
// get policy again
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
policy.Spec.Quota = nil
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
@@ -505,7 +513,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if unbound", func() {
|
||||
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
|
||||
clusterPolicy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
@@ -550,10 +558,10 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
})
|
||||
|
||||
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
|
||||
func newPolicy(spec v1beta1.VirtualClusterPolicySpec) *v1beta1.VirtualClusterPolicy {
|
||||
GinkgoHelper()
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
policy := &v1beta1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
@@ -566,7 +574,7 @@ func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterP
|
||||
return policy
|
||||
}
|
||||
|
||||
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
|
||||
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1beta1.VirtualClusterPolicy) {
|
||||
GinkgoHelper()
|
||||
|
||||
if len(namespace.Labels) == 0 {
|
||||
|
||||
@@ -3,50 +3,36 @@ package log
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
||||
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*zap.SugaredLogger
|
||||
}
|
||||
|
||||
func New(debug bool) *Logger {
|
||||
return &Logger{newZappLogger(debug).Sugar()}
|
||||
}
|
||||
|
||||
func (l *Logger) WithError(err error) log.Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *Logger) WithField(string, any) log.Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *Logger) WithFields(field log.Fields) log.Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *Logger) Named(name string) *Logger {
|
||||
l.SugaredLogger = l.SugaredLogger.Named(name)
|
||||
return l
|
||||
}
|
||||
|
||||
func newZappLogger(debug bool) *zap.Logger {
|
||||
encCfg := zap.NewProductionEncoderConfig()
|
||||
encCfg.TimeKey = "timestamp"
|
||||
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
func New(debug bool, format string) *zap.Logger {
|
||||
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
|
||||
if debug {
|
||||
lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||
}
|
||||
|
||||
encoder := zapcore.NewJSONEncoder(encCfg)
|
||||
core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl)
|
||||
encoder := newEncoder(format)
|
||||
core := zapcore.NewCore(encoder, zapcore.AddSync(os.Stderr), lvl)
|
||||
|
||||
return zap.New(core)
|
||||
}
|
||||
|
||||
func newEncoder(format string) zapcore.Encoder {
|
||||
encCfg := zap.NewProductionEncoderConfig()
|
||||
encCfg.TimeKey = "timestamp"
|
||||
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
var encoder zapcore.Encoder
|
||||
if format == "text" {
|
||||
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
encoder = zapcore.NewConsoleEncoder(encCfg)
|
||||
} else {
|
||||
encoder = zapcore.NewJSONEncoder(encCfg)
|
||||
}
|
||||
|
||||
return &ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}
|
||||
}
|
||||
|
||||
117
tests/cluster_certs_test.go
Normal file
117
tests/cluster_certs_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), Label(certificatesTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
// create custom cert secret
|
||||
customCertDir := "testdata/customcerts/"
|
||||
|
||||
certList := []string{
|
||||
"server-ca",
|
||||
"client-ca",
|
||||
"request-header-ca",
|
||||
"service",
|
||||
"etcd-peer-ca",
|
||||
"etcd-server-ca",
|
||||
}
|
||||
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
certSecret := caCertSecret(certName, namespace.Name, cert, key)
|
||||
err = k8sClient.Create(ctx, certSecret)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1beta1.CredentialSources{
|
||||
ServerCA: v1beta1.CredentialSource{
|
||||
SecretName: "server-ca",
|
||||
},
|
||||
ClientCA: v1beta1.CredentialSource{
|
||||
SecretName: "client-ca",
|
||||
},
|
||||
ETCDServerCA: v1beta1.CredentialSource{
|
||||
SecretName: "etcd-server-ca",
|
||||
},
|
||||
ETCDPeerCA: v1beta1.CredentialSource{
|
||||
SecretName: "etcd-peer-ca",
|
||||
},
|
||||
RequestHeaderCA: v1beta1.CredentialSource{
|
||||
SecretName: "request-header-ca",
|
||||
},
|
||||
ServiceAccountToken: v1beta1.CredentialSource{
|
||||
SecretName: "service",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
It("will load the custom certs in the server pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
// check server-ca.crt
|
||||
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
|
||||
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
|
||||
})
|
||||
})
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkingTestsLabel), func() {
|
||||
var (
|
||||
cluster1 *VirtualCluster
|
||||
cluster2 *VirtualCluster
|
||||
@@ -28,7 +28,6 @@ var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
curlCmd string
|
||||
err error
|
||||
)
|
||||
@@ -70,25 +69,25 @@ var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
|
||||
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
|
||||
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
|
||||
Expect(err).Should(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
|
||||
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
|
||||
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
|
||||
|
||||
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
|
||||
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
|
||||
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!")))
|
||||
})
|
||||
})
|
||||
|
||||
214
tests/cluster_persistence_test.go
Normal file
214
tests/cluster_persistence_test.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("deletes the pod in the virtual cluster when deleted from the host", func() {
|
||||
ctx := context.Background()
|
||||
pod, _ := virtualCluster.NewNginxPod("")
|
||||
|
||||
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
namespacedName := hostTranslator.NamespacedName(pod)
|
||||
|
||||
err := k8s.CoreV1().Pods(namespacedName.Namespace).Delete(ctx, namespacedName.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err := virtualCluster.Client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Second * 5).
|
||||
WithTimeout(time.Minute).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("regenerates the bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should fail")
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
By("Recover new config should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualClusterWithType(v1beta1.DynamicPersistenceMode)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("can delete the cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
By("Deleting cluster")
|
||||
|
||||
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() []corev1.Pod {
|
||||
By("listing the pods in the namespace")
|
||||
|
||||
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoLogr.Info("podlist", "len", len(podList.Items))
|
||||
|
||||
return podList.Items
|
||||
}).
|
||||
WithTimeout(2 * time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
|
||||
It("can delete a HA cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.Persistence.Type = v1beta1.DynamicPersistenceMode
|
||||
cluster.Spec.Servers = ptr.To[int32](2)
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster := &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
|
||||
By("Deleting cluster")
|
||||
|
||||
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() []corev1.Pod {
|
||||
By("listing the pods in the namespace")
|
||||
|
||||
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoLogr.Info("podlist", "len", len(podList.Items))
|
||||
|
||||
return podList.Items
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
|
||||
It("uses the same bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restartServerPod(ctx, virtualCluster)
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(2 * time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user