mirror of
https://github.com/rancher/k3k.git
synced 2026-02-19 20:40:04 +00:00
Compare commits
119 Commits
v0.3.0
...
chart-0.3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fe36b3d0c | ||
|
|
01589bb359 | ||
|
|
30217df268 | ||
|
|
04198652d5 | ||
|
|
72eb819216 | ||
|
|
4d4003f6f9 | ||
|
|
aca01127f8 | ||
|
|
1550c6b45a | ||
|
|
caf785f23b | ||
|
|
b3f7a8ab7f | ||
|
|
bd2494a0a9 | ||
|
|
237a3cb280 | ||
|
|
d23cf86fce | ||
|
|
65cb8ad123 | ||
|
|
6db88b5a00 | ||
|
|
8d89c7d133 | ||
|
|
883d401ae3 | ||
|
|
f85702dc23 | ||
|
|
084701fcd9 | ||
|
|
5eb1d2a5bb | ||
|
|
98d17cdb50 | ||
|
|
2047a600ed | ||
|
|
a98c49b59a | ||
|
|
1048e3f82d | ||
|
|
c480bc339e | ||
|
|
a0af20f20f | ||
|
|
748a439d7a | ||
|
|
0a55bec305 | ||
|
|
2ab71df139 | ||
|
|
753b31b52a | ||
|
|
fcc875ab85 | ||
|
|
57263bd10e | ||
|
|
bf82318ad9 | ||
|
|
1ca86d09d1 | ||
|
|
584bae8974 | ||
|
|
5a24c4edf7 | ||
|
|
44aa1a22ab | ||
|
|
2b115a0b80 | ||
|
|
8eb5c49ce4 | ||
|
|
54ae8d2126 | ||
|
|
3a101dccfd | ||
|
|
b81073619a | ||
|
|
f5d2e981ab | ||
|
|
541f506d9d | ||
|
|
f389a4e2be | ||
|
|
818328c9d4 | ||
|
|
0c4752039d | ||
|
|
eca219cb48 | ||
|
|
d1f88c32b3 | ||
|
|
b8f0e77a71 | ||
|
|
08ba3944e0 | ||
|
|
09e8a180de | ||
|
|
87032c8195 | ||
|
|
78e0c307b8 | ||
|
|
5758b880a5 | ||
|
|
2655d792cc | ||
|
|
93e1c85468 | ||
|
|
8fbe4b93e8 | ||
|
|
2515d19187 | ||
|
|
2b1448ffb8 | ||
|
|
fdb5bb9c19 | ||
|
|
45fdbf9363 | ||
|
|
3590b48d91 | ||
|
|
cca3d0c309 | ||
|
|
f228c4536c | ||
|
|
37fe4493e7 | ||
|
|
6a22f6f704 | ||
|
|
96a4341dfb | ||
|
|
510ab4bb8a | ||
|
|
9d96ee1e9c | ||
|
|
7c424821ca | ||
|
|
a2f5fd7592 | ||
|
|
c8df86b83b | ||
|
|
d41d2b8c31 | ||
|
|
7cb2399b89 | ||
|
|
90568f24b1 | ||
|
|
0843a9e313 | ||
|
|
b58578788c | ||
|
|
c4cc1e69cd | ||
|
|
bd947c0fcb | ||
|
|
b0b61f8d8e | ||
|
|
3281d54c6c | ||
|
|
853b0a7e05 | ||
|
|
28b15d2e92 | ||
|
|
cad59c0494 | ||
|
|
d0810af17c | ||
|
|
2b7202e676 | ||
|
|
4975b0b799 | ||
|
|
90d17cd6dd | ||
|
|
3e5e9c7965 | ||
|
|
1d027909ee | ||
|
|
6105402bf2 | ||
|
|
6031eeb09b | ||
|
|
2f582a473a | ||
|
|
26d3d29ba1 | ||
|
|
e97a3f5966 | ||
|
|
07b9cdcc86 | ||
|
|
a3cbe42782 | ||
|
|
3cf8c0a744 | ||
|
|
ddc367516b | ||
|
|
7b83b9fd36 | ||
|
|
bf8fdd9071 | ||
|
|
4ca5203df1 | ||
|
|
5e8bc0d3cd | ||
|
|
430e18bf30 | ||
|
|
ec0e5a4a87 | ||
|
|
29438121ba | ||
|
|
c2cde0c9ba | ||
|
|
1be43e0564 | ||
|
|
8913772240 | ||
|
|
dbbe03ca96 | ||
|
|
e52a682cca | ||
|
|
26a0bb6583 | ||
|
|
dee20455ee | ||
|
|
f5c9a4b3a1 | ||
|
|
65fe7a678f | ||
|
|
8811ba74de | ||
|
|
127b5fc848 | ||
|
|
d95e3fd33a |
2
.cr.yaml
2
.cr.yaml
@@ -1 +1,3 @@
|
||||
release-name-template: chart-{{ .Version }}
|
||||
make-release-latest: false
|
||||
skip-existing: true
|
||||
|
||||
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve K3K! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
|
||||
**Environmental Info:**
|
||||
Host Cluster Version:
|
||||
<!-- For example K3S v1.32.1+k3s1 or RKE2 v1.31.5+rke2r1 -->
|
||||
|
||||
Node(s) CPU architecture, OS, and Version:
|
||||
<!-- Provide the output from "uname -a" on the node(s) -->
|
||||
|
||||
Host Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "1 servers, 2 agents CNI: Flannel". -->
|
||||
|
||||
K3K Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
|
||||
|
||||
**Describe the bug:**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**Steps To Reproduce:**
|
||||
- Created a cluster with `k3k create`:
|
||||
|
||||
**Expected behavior:**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**Actual behavior:**
|
||||
<!-- A clear and concise description of what actually happened. -->
|
||||
|
||||
**Additional context / logs:**
|
||||
<!-- Add any other context and/or logs about the problem here. -->
|
||||
<!-- kubectl logs -n k3k-system -l app.kubernetes.io/instance=k3k -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> k3k-<cluster-name>-server-0 -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> -l cluster=<cluster-name>,mode=shared # in shared mode -->
|
||||
5
.github/workflows/build.yml
vendored
5
.github/workflows/build.yml
vendored
@@ -21,6 +21,9 @@ jobs:
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
@@ -30,5 +33,5 @@ jobs:
|
||||
args: --clean --snapshot
|
||||
env:
|
||||
REPO: ${{ github.repository }}
|
||||
REGISTRY:
|
||||
REGISTRY: ""
|
||||
|
||||
12
.github/workflows/chart.yml
vendored
12
.github/workflows/chart.yml
vendored
@@ -2,9 +2,6 @@ name: Chart
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
- "chart-*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -18,15 +15,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check tag
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//")
|
||||
chart_tag=$(yq .version charts/k3k/Chart.yaml)
|
||||
|
||||
echo pushed_tag=${pushed_tag} chart_tag=${chart_tag}
|
||||
[ "${pushed_tag}" == "${chart_tag}" ]
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -35,6 +35,9 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: "Read secrets"
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
if: github.repository_owner == 'rancher'
|
||||
|
||||
302
.github/workflows/test-conformance.yaml
vendored
Normal file
302
.github/workflows/test-conformance.yaml
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
name: Conformance Tests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test:
|
||||
description: "Run specific test"
|
||||
type: choice
|
||||
options:
|
||||
- conformance
|
||||
- sig-api-machinery
|
||||
- sig-apps
|
||||
- sig-architecture
|
||||
- sig-auth
|
||||
- sig-cli
|
||||
- sig-instrumentation
|
||||
- sig-network
|
||||
- sig-node
|
||||
- sig-scheduling
|
||||
- sig-storage
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
conformance:
|
||||
runs-on: ubuntu-latest
|
||||
if: inputs.test == '' || inputs.test == 'conformance'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
|
||||
- name: Install k3d and kubectl
|
||||
run: |
|
||||
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
||||
k3d version
|
||||
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
|
||||
- name: Setup Kubernetes (k3d)
|
||||
env:
|
||||
REPO_NAME: k3k-registry
|
||||
REPO_PORT: 12345
|
||||
run: |
|
||||
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
|
||||
|
||||
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
|
||||
|
||||
k3d cluster create k3k --servers 3 \
|
||||
-p "30000-30010:30000-30010@server:0" \
|
||||
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Setup K3k
|
||||
env:
|
||||
REPO: k3k-registry:12345
|
||||
run: |
|
||||
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
|
||||
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
VERSION=$(make version)
|
||||
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
|
||||
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
|
||||
|
||||
make install
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Create virtual cluster
|
||||
run: |
|
||||
kubectl create namespace k3k-mycluster
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
servers: 2
|
||||
mirrorHostNodes: true
|
||||
tlsSANs:
|
||||
- "127.0.0.1"
|
||||
expose:
|
||||
nodePort:
|
||||
serverPort: 30001
|
||||
EOF
|
||||
|
||||
echo "Wait for bootstrap secret to be available"
|
||||
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
|
||||
|
||||
k3kcli kubeconfig generate --name mycluster
|
||||
|
||||
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run conformance tests (parallel)
|
||||
if: matrix.type == 'parallel'
|
||||
run: |
|
||||
# Run conformance tests in parallel mode (skipping serial)
|
||||
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Run conformance tests (serial)
|
||||
if: matrix.type == 'serial'
|
||||
run: |
|
||||
# Run serial conformance tests
|
||||
hydrophone --focus='\[Serial\].*\[Conformance\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.type }}-logs
|
||||
path: /tmp/e2e.log
|
||||
|
||||
sigs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
tests:
|
||||
- name: sig-api-machinery
|
||||
focus: '\[sig-api-machinery\].*\[Conformance\]'
|
||||
- name: sig-apps
|
||||
focus: '\[sig-apps\].*\[Conformance\]'
|
||||
- name: sig-architecture
|
||||
focus: '\[sig-architecture\].*\[Conformance\]'
|
||||
- name: sig-auth
|
||||
focus: '\[sig-auth\].*\[Conformance\]'
|
||||
- name: sig-cli
|
||||
focus: '\[sig-cli\].*\[Conformance\]'
|
||||
- name: sig-instrumentation
|
||||
focus: '\[sig-instrumentation\].*\[Conformance\]'
|
||||
- name: sig-network
|
||||
focus: '\[sig-network\].*\[Conformance\]'
|
||||
- name: sig-node
|
||||
focus: '\[sig-node\].*\[Conformance\]'
|
||||
- name: sig-scheduling
|
||||
focus: '\[sig-scheduling\].*\[Conformance\]'
|
||||
- name: sig-storage
|
||||
focus: '\[sig-storage\].*\[Conformance\]'
|
||||
|
||||
steps:
|
||||
- name: Validate input and fail fast
|
||||
if: inputs.test != '' && inputs.test != matrix.tests.name
|
||||
run: |
|
||||
echo "Failing this job as it's not the intended target."
|
||||
exit 1
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
|
||||
- name: Install k3d and kubectl
|
||||
run: |
|
||||
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
||||
k3d version
|
||||
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
|
||||
- name: Setup Kubernetes (k3d)
|
||||
env:
|
||||
REPO_NAME: k3k-registry
|
||||
REPO_PORT: 12345
|
||||
run: |
|
||||
echo "127.0.0.1 ${REPO_NAME}" | sudo tee -a /etc/hosts
|
||||
|
||||
k3d registry create ${REPO_NAME} --port ${REPO_PORT}
|
||||
|
||||
k3d cluster create k3k --servers 3 \
|
||||
-p "30000-30010:30000-30010@server:0" \
|
||||
--registry-use k3d-${REPO_NAME}:${REPO_PORT}
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Setup K3k
|
||||
env:
|
||||
REPO: k3k-registry:12345
|
||||
run: |
|
||||
echo "127.0.0.1 k3k-registry" | sudo tee -a /etc/hosts
|
||||
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
VERSION=$(make version)
|
||||
k3d image import ${REPO}/k3k:${VERSION} -c k3k --verbose
|
||||
k3d image import ${REPO}/k3k-kubelet:${VERSION} -c k3k --verbose
|
||||
|
||||
make install
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Create virtual cluster
|
||||
run: |
|
||||
kubectl create namespace k3k-mycluster
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
servers: 2
|
||||
mirrorHostNodes: true
|
||||
tlsSANs:
|
||||
- "127.0.0.1"
|
||||
expose:
|
||||
nodePort:
|
||||
serverPort: 30001
|
||||
EOF
|
||||
|
||||
echo "Wait for bootstrap secret to be available"
|
||||
kubectl wait -n k3k-mycluster --for=create secret k3k-mycluster-bootstrap --timeout=5m
|
||||
|
||||
k3kcli kubeconfig generate --name mycluster
|
||||
|
||||
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run sigs tests
|
||||
run: |
|
||||
FOCUS="${{ matrix.tests.focus }}"
|
||||
echo "Running with --focus=${FOCUS}"
|
||||
|
||||
hydrophone --focus "${FOCUS}" \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ matrix.tests.name }}-logs
|
||||
path: /tmp/e2e.log
|
||||
156
.github/workflows/test.yaml
vendored
156
.github/workflows/test.yaml
vendored
@@ -11,7 +11,7 @@ permissions:
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -21,12 +21,12 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v1.60
|
||||
version: v2.3.0
|
||||
|
||||
tests:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -36,28 +36,35 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Check go modules
|
||||
run: |
|
||||
go mod tidy
|
||||
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(git status --porcelain)"
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
- name: Install tools
|
||||
run: |
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
ENVTEST_BIN=$(setup-envtest use -p path)
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run unit tests
|
||||
run: make test-unit
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: unit
|
||||
|
||||
- name: Run tests
|
||||
run: ginkgo -v -r --skip-file=tests
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -69,20 +76,21 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Check go modules
|
||||
run: |
|
||||
go mod tidy
|
||||
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(git status --porcelain)"
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Build
|
||||
- name: Set coverage environment
|
||||
run: |
|
||||
./scripts/build
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
@@ -90,19 +98,99 @@ jobs:
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run tests
|
||||
run: ginkgo -v ./tests
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-logs
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-logs
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
tests-cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run cli tests
|
||||
run: make test-cli
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${{ github.workspace }}/covdata/cover.out
|
||||
flags: cli
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -7,3 +7,7 @@
|
||||
.vscode/
|
||||
__debug*
|
||||
*-kubeconfig.yaml
|
||||
.envtest
|
||||
cover.out
|
||||
covcounters.**
|
||||
covmeta.**
|
||||
|
||||
@@ -1,12 +1,27 @@
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
# default linters
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- unused
|
||||
- misspell
|
||||
- wsl_v5
|
||||
|
||||
# extra
|
||||
- misspell
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
settings:
|
||||
gci:
|
||||
# The default order is `standard > default > custom > blank > dot > alias > localmodule`.
|
||||
custom-order: true
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- alias
|
||||
- localmodule
|
||||
- dot
|
||||
- blank
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'interface{}'
|
||||
replacement: 'any'
|
||||
|
||||
@@ -67,29 +67,78 @@ archives:
|
||||
# REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z
|
||||
#
|
||||
dockers:
|
||||
- id: k3k
|
||||
use: docker
|
||||
# k3k amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile"
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
|
||||
- id: k3k-kubelet
|
||||
use: docker
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.kubelet"
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
# k3k-kubelet amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
docker_manifests:
|
||||
# k3k
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
ARG GOLANG=rancher/hardened-build-base:v1.23.4b1
|
||||
FROM ${GOLANG}
|
||||
|
||||
ARG DAPPER_HOST_ARCH
|
||||
ENV ARCH $DAPPER_HOST_ARCH
|
||||
|
||||
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
|
||||
RUN if [ "${ARCH}" == "amd64" ]; then \
|
||||
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.59.0; \
|
||||
fi
|
||||
|
||||
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
|
||||
&& mv cr /bin/
|
||||
|
||||
# Tool for CRD generation.
|
||||
ENV CONTROLLER_GEN_VERSION v0.14.0
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_GEN_VERSION}
|
||||
|
||||
# Tool to setup the envtest framework to run the controllers integration tests
|
||||
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
|
||||
ENVTEST_BIN=$(setup-envtest use -p path) && \
|
||||
mkdir -p /usr/local/kubebuilder/bin && \
|
||||
cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
|
||||
ENV GO111MODULE on
|
||||
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS GIT_TAG
|
||||
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
|
||||
ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV HOME ${DAPPER_SOURCE}
|
||||
WORKDIR ${DAPPER_SOURCE}
|
||||
|
||||
ENTRYPOINT ["./ops/entry"]
|
||||
CMD ["ci"]
|
||||
124
Makefile
124
Makefile
@@ -1,14 +1,116 @@
|
||||
TARGETS := $(shell ls ops)
|
||||
.dapper:
|
||||
@echo Downloading dapper
|
||||
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
|
||||
@@chmod +x .dapper.tmp
|
||||
@./.dapper.tmp -v
|
||||
@mv .dapper.tmp .dapper
|
||||
|
||||
$(TARGETS): .dapper
|
||||
./.dapper $@
|
||||
REPO ?= rancher
|
||||
COVERAGE ?= false
|
||||
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
.DEFAULT_GOAL := default
|
||||
## Dependencies
|
||||
|
||||
.PHONY: $(TARGETS)
|
||||
GOLANGCI_LINT_VERSION := v2.3.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: version generate build package ## Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
|
||||
|
||||
.PHONY: version
|
||||
version: ## Print the current version
|
||||
@echo $(VERSION)
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
@VERSION=$(VERSION) COVERAGE=$(COVERAGE) ./scripts/build
|
||||
|
||||
.PHONY: package
|
||||
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
|
||||
|
||||
.PHONY: package-%
|
||||
package-%:
|
||||
docker build -f package/Dockerfile.$* \
|
||||
-t $(REPO)/$*:$(VERSION) \
|
||||
-t $(REPO)/$*:latest \
|
||||
-t $(REPO)/$*:dev .
|
||||
|
||||
.PHONY: push
|
||||
push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry
|
||||
|
||||
.PHONY: push-%
|
||||
push-%:
|
||||
docker push $(REPO)/$*:$(VERSION)
|
||||
docker push $(REPO)/$*:latest
|
||||
docker push $(REPO)/$*:dev
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
|
||||
|
||||
.PHONY: test-kubelet-controller
|
||||
test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
|
||||
|
||||
.PHONY: generate
|
||||
generate: ## Generate the CRDs specs
|
||||
go generate ./...
|
||||
|
||||
.PHONY: docs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
|
||||
--renderer=markdown \
|
||||
--source-path=./pkg/apis/k3k.io/v1alpha1 \
|
||||
--output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) fmt ./...
|
||||
|
||||
.PHONY: validate
|
||||
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git status --porcelain
|
||||
git --no-pager diff --exit-code
|
||||
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set controller.image.repository=$(REPO)/k3k \
|
||||
--set controller.image.tag=$(VERSION) \
|
||||
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set agent.shared.image.tag=$(VERSION) \
|
||||
k3k ./charts/k3k/
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help.
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
21
README.md
21
README.md
@@ -39,7 +39,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
### Prerequisites
|
||||
|
||||
* [Helm](https://helm.sh) must be installed to use the charts. Please refer to Helm's [documentation](https://helm.sh/docs) to get started.
|
||||
* An existing [RKE2](https://docs.rke2.io/install/quickstart) Kubernetes cluster (recommended).
|
||||
* A configured storage provider with a default storage class.
|
||||
|
||||
**Note:** If you do not have a storage provider, you can configure the cluster to use ephemeral or static storage. Please consult the [k3kcli advance usage](./docs/advanced-usage.md#using-the-cli) for instructions on using these options.
|
||||
|
||||
### Install the K3k controller
|
||||
|
||||
@@ -53,10 +56,10 @@ This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
2. Install the K3k controller:
|
||||
|
||||
```bash
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
|
||||
```
|
||||
|
||||
**NOTE:** K3k is currently under development, so the chart is marked as a development chart. This means you need to add the `--devel` flag to install it. For production use, keep an eye on releases for stable versions. We recommend using the latest released version when possible.
|
||||
**NOTE:** K3k is currently under development. We recommend using the latest released version when possible.
|
||||
|
||||
|
||||
### Install the `k3kcli`
|
||||
@@ -68,7 +71,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -76,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.2.2-rc4/k3kc
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.2.2-rc4
|
||||
k3kcli version v0.3.5
|
||||
```
|
||||
|
||||
|
||||
@@ -94,6 +97,16 @@ To create a new K3k cluster, use the following command:
|
||||
```bash
|
||||
k3kcli cluster create mycluster
|
||||
```
|
||||
> [!NOTE]
|
||||
> **Creating a K3k Cluster on a Rancher-Managed Host Cluster**
|
||||
>
|
||||
> If your *host* Kubernetes cluster is managed by Rancher (e.g., your kubeconfig's `server` address includes a Rancher URL), use the `--kubeconfig-server` flag when creating your K3k cluster:
|
||||
>
|
||||
>```bash
|
||||
>k3kcli cluster create --kubeconfig-server <host_node_IP_or_load_balancer_IP> mycluster
|
||||
>```
|
||||
>
|
||||
> This ensures the generated kubeconfig connects to the correct endpoint.
|
||||
|
||||
When the K3s server is ready, `k3kcli` will generate the necessary kubeconfig file and print instructions on how to use it.
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.1.6-r1
|
||||
appVersion: v0.2.2-rc5
|
||||
version: 0.3.5
|
||||
appVersion: v0.3.5
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
controller-gen.kubebuilder.io/version: v0.16.0
|
||||
name: clusters.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
@@ -14,9 +14,23 @@ spec:
|
||||
singular: cluster
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.mode
|
||||
name: Mode
|
||||
type: string
|
||||
- jsonPath: .status.phase
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .status.policyName
|
||||
name: Policy
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
@@ -37,118 +51,330 @@ spec:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the Cluster.
|
||||
properties:
|
||||
addons:
|
||||
description: Addons is a list of secrets containing raw YAML which
|
||||
will be deployed in the virtual K3k cluster on startup.
|
||||
description: Addons specifies secrets containing raw YAML to deploy
|
||||
on cluster startup.
|
||||
items:
|
||||
description: Addon specifies a Secret containing YAML to be deployed
|
||||
on cluster startup.
|
||||
properties:
|
||||
secretNamespace:
|
||||
description: SecretNamespace is the namespace of the Secret.
|
||||
type: string
|
||||
secretRef:
|
||||
description: SecretRef is the name of the Secret.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
agentArgs:
|
||||
description: AgentArgs are the ordered key value pairs (e.x. "testArg",
|
||||
"testValue") for the K3s pods running in agent mode.
|
||||
description: |-
|
||||
AgentArgs specifies ordered key-value pairs for K3s agent pods.
|
||||
Example: ["--node-name=my-agent-node"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
agentEnvs:
|
||||
description: AgentEnvs specifies list of environment variables to
|
||||
set in the agent pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
Variable references $(VAR_NAME) are expanded
|
||||
using the previously defined environment variables in the container and
|
||||
any service environment variables. If a variable cannot be resolved,
|
||||
the reference in the input string will be unchanged. Double $$ are reduced
|
||||
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
|
||||
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
|
||||
Escaped references will never be expanded, regardless of whether the variable
|
||||
exists or not.
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fieldRef:
|
||||
description: |-
|
||||
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
description: 'Required: resource to select'
|
||||
type: string
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
secretKeyRef:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
agents:
|
||||
default: 0
|
||||
description: Agents is the number of K3s pods to run in agent (worker)
|
||||
mode.
|
||||
description: |-
|
||||
Agents specifies the number of K3s pods to run in agent (worker) mode.
|
||||
Must be 0 or greater. Defaults to 0.
|
||||
This field is ignored in "shared" mode.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: invalid value for agents
|
||||
rule: self >= 0
|
||||
clusterCIDR:
|
||||
description: ClusterCIDR is the CIDR range for the pods of the cluster.
|
||||
Defaults to 10.42.0.0/16.
|
||||
description: |-
|
||||
ClusterCIDR is the CIDR range for pod IPs.
|
||||
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
clusterDNS:
|
||||
description: |-
|
||||
ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.
|
||||
Defaults to 10.43.0.10.
|
||||
ClusterDNS is the IP address for the CoreDNS service.
|
||||
Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
clusterLimit:
|
||||
description: Limit is the limits that apply for the server/worker
|
||||
nodes.
|
||||
customCAs:
|
||||
description: CustomCAs specifies the cert/key pairs for custom CA
|
||||
certificates.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit is the limits (cpu/mem) that apply to
|
||||
the server nodes
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit is the limits (cpu/mem) that apply to
|
||||
the agent nodes
|
||||
enabled:
|
||||
description: Enabled toggles this feature on or off.
|
||||
type: boolean
|
||||
sources:
|
||||
description: Sources defines the sources for all required custom
|
||||
CA certificates.
|
||||
properties:
|
||||
clientCA:
|
||||
description: ClientCA specifies the client-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
etcdPeerCA:
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
|
||||
pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
etcdServerCA:
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key
|
||||
pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
requestHeaderCA:
|
||||
description: RequestHeaderCA specifies the request-header-ca
|
||||
cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
serverCA:
|
||||
description: ServerCA specifies the server-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
serviceAccountToken:
|
||||
description: ServiceAccountToken specifies the service-account-token
|
||||
key.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
|
||||
clusterIP which is relatively secure, but difficult to access outside of the cluster.
|
||||
Expose specifies options for exposing the API server.
|
||||
By default, it's only exposed as a ClusterIP.
|
||||
properties:
|
||||
ingress:
|
||||
description: Ingress specifies options for exposing the API server
|
||||
through an Ingress.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Annotations is a key value map that will enrich
|
||||
the Ingress annotations
|
||||
description: Annotations specifies annotations to add to the
|
||||
Ingress.
|
||||
type: object
|
||||
ingressClassName:
|
||||
description: IngressClassName specifies the IngressClass to
|
||||
use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadbalancer:
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
nodePort:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.
|
||||
If not specified, the default etcd 2379 port will be allocated.
|
||||
If 0 or negative, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.
|
||||
If not specified, the default https 443 port will be allocated.
|
||||
If 0 or negative, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
servicePort:
|
||||
type: object
|
||||
nodePort:
|
||||
description: NodePort specifies options for exposing the API server
|
||||
through NodePort.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767)
|
||||
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
If not specified, a random port between 30000-32767 will be allocated.
|
||||
If out of range, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.
|
||||
If not specified, a random port between 30000-32767 will be allocated.
|
||||
If out of range, the port will not be exposed.
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive;
|
||||
only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x,
|
||||
x).size() <= 1'
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
MirrorHostNodes controls whether node objects from the host cluster
|
||||
are mirrored into the virtual cluster.
|
||||
type: boolean
|
||||
mode:
|
||||
allOf:
|
||||
- enum:
|
||||
@@ -158,8 +384,9 @@ spec:
|
||||
- shared
|
||||
- virtual
|
||||
default: shared
|
||||
description: Mode is the cluster provisioning mode which can be either
|
||||
"shared" or "virtual". Defaults to "shared"
|
||||
description: |-
|
||||
Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
Defaults to "shared". This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
@@ -168,64 +395,302 @@ spec:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector is the node selector that will be applied to all server/agent pods.
|
||||
In "shared" mode the node selector will be applied also to the workloads.
|
||||
NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: object
|
||||
persistence:
|
||||
default:
|
||||
type: dynamic
|
||||
description: |-
|
||||
Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
|
||||
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
|
||||
Persistence specifies options for persisting etcd data.
|
||||
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
A default StorageClass is required for dynamic persistence.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
default: 1G
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: PersistenceMode is the storage mode of a Cluster.
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
priorityClass:
|
||||
description: |-
|
||||
PriorityClass is the priorityClassName that will be applied to all server/agent pods.
|
||||
In "shared" mode the priorityClassName will be applied also to the workloads.
|
||||
PriorityClass specifies the priorityClassName for server/agent pods.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: string
|
||||
serverArgs:
|
||||
description: ServerArgs are the ordered key value pairs (e.x. "testArg",
|
||||
"testValue") for the K3s pods running in server mode.
|
||||
description: |-
|
||||
ServerArgs specifies ordered key-value pairs for K3s server pods.
|
||||
Example: ["--tls-san=example.com"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
serverEnvs:
|
||||
description: ServerEnvs specifies list of environment variables to
|
||||
set in the server pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
type: string
|
||||
value:
|
||||
description: |-
|
||||
Variable references $(VAR_NAME) are expanded
|
||||
using the previously defined environment variables in the container and
|
||||
any service environment variables. If a variable cannot be resolved,
|
||||
the reference in the input string will be unchanged. Double $$ are reduced
|
||||
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
|
||||
"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
|
||||
Escaped references will never be expanded, regardless of whether the variable
|
||||
exists or not.
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
fieldRef:
|
||||
description: |-
|
||||
Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
resourceFieldRef:
|
||||
description: |-
|
||||
Selects a resource of the container: only resources limits and requests
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
description: 'Required: resource to select'
|
||||
type: string
|
||||
required:
|
||||
- resource
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
secretKeyRef:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
description: |-
|
||||
Name of the referent.
|
||||
This field is effectively required, but due to backwards compatibility is
|
||||
allowed to be empty. Instances of this type with an empty value here are
|
||||
almost certainly wrong.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server nodes.
|
||||
type: object
|
||||
servers:
|
||||
default: 1
|
||||
description: Servers is the number of K3s pods to run in server (controlplane)
|
||||
mode.
|
||||
description: |-
|
||||
Servers specifies the number of K3s pods to run in server (control plane) mode.
|
||||
Must be at least 1. Defaults to 1.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: cluster must have at least one server
|
||||
rule: self >= 1
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for the services in the
|
||||
cluster. Defaults to 10.43.0.0/16.
|
||||
description: |-
|
||||
ServiceCIDR is the CIDR range for service IPs.
|
||||
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: serviceCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
tlsSANs:
|
||||
description: TLSSANs are the subjectAlternativeNames for the certificate
|
||||
the K3s server will use.
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tokenSecretRef:
|
||||
description: |-
|
||||
TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller
|
||||
assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored.
|
||||
TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
The Secret must have a "token" field in its data.
|
||||
properties:
|
||||
name:
|
||||
description: name is unique within a namespace to reference a
|
||||
@@ -238,37 +703,125 @@ spec:
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
version:
|
||||
description: Version is a string representing the Kubernetes version
|
||||
to be used by the virtual nodes.
|
||||
description: |-
|
||||
Version is the K3s version to use for the virtual nodes.
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
If not specified, the Kubernetes version of the host node will be used.
|
||||
type: string
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
default: {}
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
properties:
|
||||
clusterCIDR:
|
||||
description: ClusterCIDR is the CIDR range for pod IPs.
|
||||
type: string
|
||||
clusterDNS:
|
||||
description: ClusterDNS is the IP address for the CoreDNS service.
|
||||
type: string
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
hostVersion:
|
||||
description: HostVersion is the Kubernetes version of the host node.
|
||||
type: string
|
||||
kubeletPort:
|
||||
description: KubeletPort specefies the port used by k3k-kubelet in
|
||||
shared mode.
|
||||
type: integer
|
||||
phase:
|
||||
default: Unknown
|
||||
description: Phase is a high-level summary of the cluster's current
|
||||
lifecycle state.
|
||||
enum:
|
||||
- Pending
|
||||
- Provisioning
|
||||
- Ready
|
||||
- Failed
|
||||
- Terminating
|
||||
- Unknown
|
||||
type: string
|
||||
policyName:
|
||||
description: PolicyName specifies the virtual cluster policy name
|
||||
bound to the virtual cluster.
|
||||
type: string
|
||||
persistence:
|
||||
properties:
|
||||
storageClassName:
|
||||
type: string
|
||||
storageRequestSize:
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: PersistenceMode is the storage mode of a Cluster.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for service IPs.
|
||||
type: string
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
webhookPort:
|
||||
description: WebhookPort specefies the port used by webhook in k3k-kubelet
|
||||
in shared mode.
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -1,210 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: clustersets.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: ClusterSet
|
||||
listKind: ClusterSetList
|
||||
plural: clustersets
|
||||
singular: clusterset
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec is the spec of the ClusterSet
|
||||
properties:
|
||||
allowedNodeTypes:
|
||||
default:
|
||||
- shared
|
||||
description: AllowedNodeTypes are the allowed cluster provisioning
|
||||
modes. Defaults to [shared].
|
||||
items:
|
||||
description: ClusterMode is the possible provisioning mode of a
|
||||
Cluster.
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultLimits:
|
||||
description: DefaultLimits are the limits used for servers/agents
|
||||
when a cluster in the set doesn't provide any
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit is the limits (cpu/mem) that apply to
|
||||
the server nodes
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit is the limits (cpu/mem) that apply to
|
||||
the agent nodes
|
||||
type: object
|
||||
type: object
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector is the node selector that applies
|
||||
to all clusters (server + agent) in the set
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass is the priorityClassName applied
|
||||
to all pods of all clusters in the set
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy is an option that will disable the
|
||||
creation of a default networkpolicy for cluster isolation
|
||||
type: boolean
|
||||
maxLimits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimits are the limits that apply to all clusters (server
|
||||
+ agent) in the set
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel is the policy level applied
|
||||
to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
type: object
|
||||
status:
|
||||
description: Status is the status of the ClusterSet
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the invidual conditions for the cluster
|
||||
set
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource.\n---\nThis struct is intended for
|
||||
direct use as an array at the field path .status.conditions. For
|
||||
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
|
||||
observations of a foo's current state.\n\t // Known .status.conditions.type
|
||||
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
|
||||
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
|
||||
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
|
||||
\ // other fields\n\t}"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
---
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
412
charts/k3k/crds/k3k.io_virtualclusterpolicies.yaml
Normal file
412
charts/k3k/crds/k3k.io_virtualclusterpolicies.yaml
Normal file
@@ -0,0 +1,412 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.0
|
||||
name: virtualclusterpolicies.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: VirtualClusterPolicy
|
||||
listKind: VirtualClusterPolicyList
|
||||
plural: virtualclusterpolicies
|
||||
shortNames:
|
||||
- vcp
|
||||
singular: virtualclusterpolicy
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.allowedMode
|
||||
name: Mode
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VirtualClusterPolicy allows defining common configurations and constraints
|
||||
for clusters within a clusterpolicy.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the VirtualClusterPolicy.
|
||||
properties:
|
||||
allowedMode:
|
||||
default: shared
|
||||
description: AllowedMode specifies the allowed cluster provisioning
|
||||
mode. Defaults to "shared".
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the target Namespace.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the target Namespace.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
limit:
|
||||
description: |-
|
||||
Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy
|
||||
to set defaults and constraints (min/max)
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- limits
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
quota:
|
||||
description: Quota specifies the resource limits for clusters within
|
||||
a clusterpolicy.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: |-
|
||||
hard is the set of desired hard limits for each named resource.
|
||||
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: |-
|
||||
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
|
||||
but expressed using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: |-
|
||||
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
|
||||
that relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents a scope's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
scopes:
|
||||
description: |-
|
||||
A collection of filters that must match each object tracked by a quota.
|
||||
If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the VirtualClusterPolicy.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -60,3 +60,54 @@ Create the name of the service account to use
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Print the image pull secrets in the expected format (an array of objects with one possible field, "name").
|
||||
*/}}
|
||||
{{- define "image.pullSecrets" }}
|
||||
{{- $imagePullSecrets := list }}
|
||||
{{- range . }}
|
||||
{{- if kindIs "string" . }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }}
|
||||
{{- else }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml $imagePullSecrets }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "controller.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "server.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.virtual.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.shared.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.image.replicaCount }}
|
||||
replicas: {{ .Values.controller.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 6 }}
|
||||
@@ -15,17 +15,58 @@ spec:
|
||||
labels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }}
|
||||
containers:
|
||||
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
name: {{ .Chart.Name }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Values.controller.resources.requests.cpu }}
|
||||
memory: {{ .Values.controller.resources.requests.memory }}
|
||||
limits:
|
||||
{{ if .Values.controller.resources.limits.cpu }}
|
||||
cpu: {{ .Values.controller.resources.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if .Values.controller.resources.limits.memory }}
|
||||
memory: {{ .Values.controller.resources.limits.memory }}
|
||||
{{ end}}
|
||||
args:
|
||||
- k3k
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --agent-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --server-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CLUSTER_CIDR
|
||||
value: {{ .Values.host.clusterCIDR }}
|
||||
- name: SHARED_AGENT_IMAGE
|
||||
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
|
||||
- name: SHARED_AGENT_PULL_POLICY
|
||||
value: {{ .Values.sharedAgent.image.pullPolicy }}
|
||||
value: "{{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}"
|
||||
- name: SHARED_AGENT_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.agent.shared.image.pullPolicy }}
|
||||
- name: VIRTUAL_AGENT_IMAGE
|
||||
value: "{{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}"
|
||||
- name: VIRTUAL_AGENT_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.agent.virtual.image.pullPolicy }}
|
||||
- name: K3S_SERVER_IMAGE
|
||||
value: "{{- include "server.registry" .}}{{ .Values.server.image.repository }}"
|
||||
- name: K3S_SERVER_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.server.image.pullPolicy }}
|
||||
- name: KUBELET_PORT_RANGE
|
||||
value: {{ .Values.agent.shared.kubeletPortRange }}
|
||||
- name: WEBHOOK_PORT_RANGE
|
||||
value: {{ .Values.agent.shared.webhookPortRange }}
|
||||
- name: CONTROLLER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.extraEnv }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: https
|
||||
|
||||
@@ -16,7 +16,7 @@ subjects:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -30,8 +30,29 @@ rules:
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: k3k-priorityclass
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "scheduling.k8s.io"
|
||||
resources:
|
||||
- "priorityclasses"
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: k3k-priorityclass
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: k3k-priorityclass
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy for clustersets, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
global:
|
||||
# -- Global override for container image registry
|
||||
imageRegistry: ""
|
||||
# -- Global override for container image registry pull secrets
|
||||
imagePullSecrets: []
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
@@ -21,9 +14,65 @@ serviceAccount:
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# configuration related to the shared agent mode in k3k
|
||||
sharedAgent:
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
|
||||
controller:
|
||||
replicaCount: 1
|
||||
image:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
registry: ""
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
imagePullSecrets: []
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
# resources limits and requests allows you to set resources limits and requests for CPU and Memory
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
limits:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
# configuration related to the agent component in k3k
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
# configuration related to agent in shared mode
|
||||
shared:
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
# configuration related to agent in virtual mode
|
||||
virtual:
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
# configuration related to k3s server component in k3k
|
||||
server:
|
||||
imagePullSecrets: []
|
||||
image:
|
||||
registry:
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
20
cli/cmds/cluster.go
Normal file
20
cli/cmds/cluster.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "cluster command",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
NewClusterListCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewCreateCmd(),
|
||||
NewDeleteCmd(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
type CreateConfig struct {
|
||||
token string
|
||||
clusterCIDR string
|
||||
serviceCIDR string
|
||||
servers int
|
||||
agents int
|
||||
serverArgs cli.StringSlice
|
||||
agentArgs cli.StringSlice
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
version string
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
}
|
||||
|
||||
func NewCreateCmd() *cli.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
createFlags := NewCreateFlags(createConfig)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(createConfig),
|
||||
Flags: append(cmds.CommonFlags, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Infof("Creating cluster token secret")
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, cmds.Namespace())
|
||||
if err := ctrlClient.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating a new cluster [%s]", name)
|
||||
|
||||
cluster := newCluster(name, cmds.Namespace(), config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
host := strings.Split(url.Host, ":")
|
||||
if config.kubeconfigServerHost != "" {
|
||||
host = []string{config.kubeconfigServerHost}
|
||||
}
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := ctrlClient.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
// retry every 5s for at most 2m, or 25 times
|
||||
availableBackoff := wait.Backoff{
|
||||
Duration: 5 * time.Second,
|
||||
Cap: 2 * time.Minute,
|
||||
Steps: 25,
|
||||
}
|
||||
|
||||
cfg := kubeconfig.New()
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfigData, 0644)
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Cluster",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Servers: ptr.To(int32(config.servers)),
|
||||
Agents: ptr.To(int32(config.agents)),
|
||||
ClusterCIDR: config.clusterCIDR,
|
||||
ServiceCIDR: config.serviceCIDR,
|
||||
ServerArgs: config.serverArgs.Value(),
|
||||
AgentArgs: config.agentArgs.Value(),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.PersistenceMode(config.persistenceType),
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
},
|
||||
},
|
||||
}
|
||||
if config.storageClassName == "" {
|
||||
cluster.Spec.Persistence.StorageClassName = nil
|
||||
}
|
||||
if config.token != "" {
|
||||
cluster.Spec.TokenSecretRef = &v1.SecretReference{
|
||||
Name: k3kcluster.TokenSecretName(name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "servers",
|
||||
Usage: "number of servers",
|
||||
Destination: &config.servers,
|
||||
Value: 1,
|
||||
Action: func(ctx *cli.Context, value int) error {
|
||||
if value <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "agents",
|
||||
Usage: "number of agents",
|
||||
Destination: &config.agents,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "token of the cluster",
|
||||
Destination: &config.token,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
Usage: "cluster CIDR",
|
||||
Destination: &config.clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-cidr",
|
||||
Usage: "service CIDR",
|
||||
Destination: &config.serviceCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "persistence-type",
|
||||
Usage: "persistence mode for the nodes (ephemeral, static, dynamic)",
|
||||
Value: string(v1alpha1.DynamicNodesType),
|
||||
Destination: &config.persistenceType,
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch v1alpha1.PersistenceMode(value) {
|
||||
case v1alpha1.EphemeralNodeType, v1alpha1.DynamicNodesType:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "ephemeral", "static" or "dynamic"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage-class-name",
|
||||
Usage: "storage class name for dynamic persistence type",
|
||||
Destination: &config.storageClassName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Value: &config.serverArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Value: &config.agentArgs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "k3s version",
|
||||
Destination: &config.version,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "k3k mode type",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &config.kubeconfigServerHost,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func NewDeleteCmd() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete,
|
||||
Flags: cmds.CommonFlags,
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func delete(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("deleting [%s] cluster", name)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: cmds.Namespace(),
|
||||
},
|
||||
}
|
||||
return ctrlClient.Delete(ctx, &cluster)
|
||||
}
|
||||
343
cli/cmds/cluster_create.go
Normal file
343
cli/cmds/cluster_create.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
)
|
||||
|
||||
type CreateConfig struct {
|
||||
token string
|
||||
clusterCIDR string
|
||||
serviceCIDR string
|
||||
servers int
|
||||
agents int
|
||||
serverArgs []string
|
||||
agentArgs []string
|
||||
serverEnvs []string
|
||||
agentEnvs []string
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
storageRequestSize string
|
||||
version string
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
policy string
|
||||
mirrorHostNodes bool
|
||||
customCertsPath string
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new cluster",
|
||||
Example: "k3kcli cluster create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return validateCreateConfig(createConfig)
|
||||
},
|
||||
RunE: createAction(appCtx, createConfig),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
createFlags(cmd, createConfig)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
|
||||
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
if err := createNamespace(ctx, client, namespace, config.policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.Contains(config.version, "+") {
|
||||
orig := config.version
|
||||
config.version = strings.ReplaceAll(config.version, "+", "-")
|
||||
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Info("Creating cluster token secret")
|
||||
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
|
||||
|
||||
if err := client.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
if err := CreateCustomCertsSecrets(ctx, name, namespace, config.customCertsPath, client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := newCluster(name, namespace, config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if config.kubeconfigServerHost != "" {
|
||||
host = []string{config.kubeconfigServerHost}
|
||||
}
|
||||
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := client.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Waiting for cluster to be available..")
|
||||
|
||||
if err := waitForCluster(ctx, client, cluster); err != nil {
|
||||
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
|
||||
// retry every 5s for at most 2m, or 25 times
|
||||
availableBackoff := wait.Backoff{
|
||||
Duration: 5 * time.Second,
|
||||
Cap: 2 * time.Minute,
|
||||
Steps: 25,
|
||||
}
|
||||
|
||||
cfg := kubeconfig.New()
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0], 0)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(cluster, kubeconfig, "")
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Cluster",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Servers: ptr.To(int32(config.servers)),
|
||||
Agents: ptr.To(int32(config.agents)),
|
||||
ClusterCIDR: config.clusterCIDR,
|
||||
ServiceCIDR: config.serviceCIDR,
|
||||
ServerArgs: config.serverArgs,
|
||||
AgentArgs: config.agentArgs,
|
||||
ServerEnvs: env(config.serverEnvs),
|
||||
AgentEnvs: env(config.agentEnvs),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.PersistenceMode(config.persistenceType),
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
StorageRequestSize: config.storageRequestSize,
|
||||
},
|
||||
MirrorHostNodes: config.mirrorHostNodes,
|
||||
},
|
||||
}
|
||||
if config.storageClassName == "" {
|
||||
cluster.Spec.Persistence.StorageClassName = nil
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
cluster.Spec.TokenSecretRef = &v1.SecretReference{
|
||||
Name: k3kcluster.TokenSecretName(name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
|
||||
},
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
func env(envSlice []string) []v1.EnvVar {
|
||||
var envVars []v1.EnvVar
|
||||
|
||||
for _, env := range envSlice {
|
||||
keyValue := strings.Split(env, "=")
|
||||
if len(keyValue) != 2 {
|
||||
logrus.Fatalf("incorrect value for environment variable %s", env)
|
||||
}
|
||||
|
||||
envVars = append(envVars, v1.EnvVar{
|
||||
Name: keyValue[0],
|
||||
Value: keyValue[1],
|
||||
})
|
||||
}
|
||||
|
||||
return envVars
|
||||
}
|
||||
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
|
||||
interval := 5 * time.Second
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
key := client.ObjectKeyFromObject(cluster)
|
||||
if err := k8sClient.Get(ctx, key, cluster); err != nil {
|
||||
return false, fmt.Errorf("failed to get resource: %w", err)
|
||||
}
|
||||
|
||||
// If resource ready -> stop polling
|
||||
if cluster.Status.Phase == v1alpha1.ClusterReady {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If resource failed -> stop polling with an error
|
||||
if cluster.Status.Phase == v1alpha1.ClusterFailed {
|
||||
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
|
||||
}
|
||||
|
||||
// Condition not met, continue polling.
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsPath string, k8sclient client.Client) error {
|
||||
customCAsMap := map[string]string{
|
||||
"etcd-peer-ca": "/etcd/peer-ca",
|
||||
"etcd-server-ca": "/etcd/server-ca",
|
||||
"server-ca": "/server-ca",
|
||||
"client-ca": "/client-ca",
|
||||
"request-header-ca": "/request-header-ca",
|
||||
"service-account-token": "/service",
|
||||
}
|
||||
|
||||
for certName, fileName := range customCAsMap {
|
||||
var (
|
||||
certFilePath, keyFilePath string
|
||||
cert, key []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if certName != "service-account-token" {
|
||||
certFilePath = customCertsPath + fileName + ".crt"
|
||||
|
||||
cert, err = os.ReadFile(certFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
keyFilePath = customCertsPath + fileName + ".key"
|
||||
|
||||
key, err = os.ReadFile(keyFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certSecret := caCertSecret(certName, name, namespace, cert, key)
|
||||
|
||||
if err := k8sclient.Create(ctx, certSecret); err != nil {
|
||||
return client.IgnoreAlreadyExists(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
|
||||
Namespace: clusterNamespace,
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: cert,
|
||||
v1.TLSPrivateKeyKey: key,
|
||||
},
|
||||
}
|
||||
}
|
||||
61
cli/cmds/cluster_create_flags.go
Normal file
61
cli/cmds/cluster_create_flags.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
cmd.Flags().IntVar(&cfg.servers, "servers", 1, "number of servers")
|
||||
cmd.Flags().IntVar(&cfg.agents, "agents", 0, "number of agents")
|
||||
cmd.Flags().StringVar(&cfg.token, "token", "", "token of the cluster")
|
||||
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
|
||||
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
|
||||
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
|
||||
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
|
||||
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
|
||||
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
|
||||
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
|
||||
cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments")
|
||||
cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs")
|
||||
cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs")
|
||||
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
|
||||
cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)")
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
|
||||
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
|
||||
}
|
||||
|
||||
func validateCreateConfig(cfg *CreateConfig) error {
|
||||
if cfg.servers <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
|
||||
if cfg.persistenceType != "" {
|
||||
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := resource.ParseQuantity(cfg.storageRequestSize); err != nil {
|
||||
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
|
||||
}
|
||||
|
||||
if cfg.mode != "" {
|
||||
switch cfg.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
115
cli/cmds/cluster_delete.go
Normal file
115
cli/cmds/cluster_delete.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
|
||||
var keepData bool
|
||||
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing cluster",
|
||||
Example: "k3kcli cluster delete [command options] NAME",
|
||||
RunE: delete(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
cmd.Flags().BoolVar(&keepData, "keep-data", false, "keeps persistence volumes created for the cluster after deletion")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// keep bootstrap secrets and tokens if --keep-data flag is passed
|
||||
if keepData {
|
||||
// skip removing tokenSecret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip removing webhook secret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
|
||||
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
|
||||
matchingLabels.ApplyToList(&listOpts)
|
||||
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
|
||||
|
||||
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, &cluster); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
|
||||
var secret v1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if err := cl.Get(ctx, key, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("%s secret is not found", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerutil.HasControllerReference(&secret) {
|
||||
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cl.Update(ctx, &secret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
52
cli/cmds/cluster_list.go
Normal file
52
cli/cmds/cluster_list.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing cluster",
|
||||
Example: "k3kcli cluster list [command options]",
|
||||
RunE: list(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "clusters.k3k.io"}, crd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
items := toPointerSlice(clusters.Items)
|
||||
table := createTable(crd, items)
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true})
|
||||
|
||||
return printer.PrintObj(table, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
153
cli/cmds/kubeconfig.go
Normal file
153
cli/cmds/kubeconfig.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
)
|
||||
|
||||
type GenerateKubeconfigConfig struct {
|
||||
name string
|
||||
configName string
|
||||
cn string
|
||||
org []string
|
||||
altNames []string
|
||||
expirationDays int64
|
||||
kubeconfigServerHost string
|
||||
}
|
||||
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig",
|
||||
Short: "Manage kubeconfig for clusters",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
|
||||
cfg := &GenerateKubeconfigConfig{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate kubeconfig for clusters",
|
||||
RunE: generate(appCtx, cfg),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
generateKubeconfigFlags(cmd, cfg)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func generateKubeconfigFlags(cmd *cobra.Command, cfg *GenerateKubeconfigConfig) {
|
||||
cmd.Flags().StringVar(&cfg.name, "name", "", "cluster name")
|
||||
cmd.Flags().StringVar(&cfg.configName, "config-name", "", "the name of the generated kubeconfig file")
|
||||
cmd.Flags().StringVar(&cfg.cn, "cn", controller.AdminCommonName, "Common name (CN) of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().StringSliceVar(&cfg.org, "org", nil, "Organization name (ORG) of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().StringSliceVar(&cfg.altNames, "altNames", nil, "altNames of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().Int64Var(&cfg.expirationDays, "expiration-days", 365, "Expiration date of the certificates used for the kubeconfig")
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
}
|
||||
|
||||
func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: cfg.name,
|
||||
Namespace: appCtx.Namespace(cfg.name),
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if cfg.kubeconfigServerHost != "" {
|
||||
host = []string{cfg.kubeconfigServerHost}
|
||||
cfg.altNames = append(cfg.altNames, cfg.kubeconfigServerHost)
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(cfg.altNames)
|
||||
|
||||
if len(cfg.org) == 0 {
|
||||
cfg.org = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
kubeCfg := kubeconfig.KubeConfig{
|
||||
CN: cfg.cn,
|
||||
ORG: cfg.org,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(cfg.expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = kubeCfg.Generate(ctx, client, &cluster, host[0], 0)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(&cluster, kubeconfig, cfg.configName)
|
||||
}
|
||||
}
|
||||
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, configName))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(configName, kubeconfigData, 0o644)
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
name string
|
||||
cn string
|
||||
org cli.StringSlice
|
||||
altNames cli.StringSlice
|
||||
expirationDays int64
|
||||
configName string
|
||||
kubeconfigServerHost string
|
||||
generateKubeconfigFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "cluster name",
|
||||
Destination: &name,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-name",
|
||||
Usage: "the name of the generated kubeconfig file",
|
||||
Destination: &configName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cn",
|
||||
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
|
||||
Destination: &cn,
|
||||
Value: controller.AdminCommonName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "org",
|
||||
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
|
||||
Value: &org,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "altNames",
|
||||
Usage: "altNames of the generated certificates for the kubeconfig",
|
||||
Value: &altNames,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "expiration-days",
|
||||
Usage: "Expiration date of the certificates used for the kubeconfig",
|
||||
Destination: &expirationDays,
|
||||
Value: 356,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &kubeconfigServerHost,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var subcommands = []*cli.Command{
|
||||
{
|
||||
Name: "generate",
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate,
|
||||
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
|
||||
},
|
||||
}
|
||||
|
||||
func NewCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: subcommands,
|
||||
}
|
||||
}
|
||||
|
||||
func generate(clx *cli.Context) error {
|
||||
var cluster v1alpha1.Cluster
|
||||
ctx := context.Background()
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cmds.Namespace(),
|
||||
}
|
||||
|
||||
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
err := altNames.Set(kubeconfigServerHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if configName == "" {
|
||||
configName = cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, configName))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(configName, kubeconfigData, 0644)
|
||||
}
|
||||
20
cli/cmds/policy.go
Normal file
20
cli/cmds/policy.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "policy",
|
||||
Short: "policy command",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewPolicyCreateCmd(appCtx),
|
||||
NewPolicyDeleteCmd(appCtx),
|
||||
NewPolicyListCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
109
cli/cmds/policy_create.go
Normal file
109
cli/cmds/policy_create.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
)
|
||||
|
||||
type VirtualClusterPolicyCreateConfig struct {
|
||||
mode string
|
||||
}
|
||||
|
||||
func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
config := &VirtualClusterPolicyCreateConfig{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new policy",
|
||||
Example: "k3kcli policy create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch config.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
RunE: policyCreateAction(appCtx, config),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&config.mode, "mode", "shared", "The allowed mode type of the policy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
policyName := args[0]
|
||||
|
||||
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(ctx context.Context, client client.Client, name, policyName string) error {
|
||||
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
||||
|
||||
if policyName != "" {
|
||||
ns.Labels = map[string]string{
|
||||
policy.PolicyNameLabelKey: policyName,
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`Creating namespace [%s]`, name)
|
||||
|
||||
if err := client.Create(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
|
||||
logrus.Infof("Creating policy [%s]", policyName)
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: policyName,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "VirtualClusterPolicy",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.VirtualClusterPolicySpec{
|
||||
AllowedMode: mode,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Create(ctx, policy); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Infof("Policy [%s] already exists", policyName)
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
||||
43
cli/cmds/policy_delete.go
Normal file
43
cli/cmds/policy_delete.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing policy",
|
||||
Example: "k3kcli policy delete [command options] NAME",
|
||||
RunE: policyDeleteAction(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
}
|
||||
|
||||
func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{}
|
||||
policy.Name = name
|
||||
|
||||
if err := client.Delete(ctx, policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("Policy not found")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
47
cli/cmds/policy_list.go
Normal file
47
cli/cmds/policy_list.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing policies",
|
||||
Example: "k3kcli policy list [command options]",
|
||||
RunE: policyList(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
}
|
||||
|
||||
func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
var policies v1alpha1.VirtualClusterPolicyList
|
||||
if err := client.List(ctx, &policies); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crd := &apiextensionsv1.CustomResourceDefinition{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "virtualclusterpolicies.k3k.io"}, crd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
items := toPointerSlice(policies.Items)
|
||||
table := createTable(crd, items)
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{})
|
||||
|
||||
return printer.PrintObj(table, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
153
cli/cmds/root.go
153
cli/cmds/root.go
@@ -1,62 +1,119 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNamespace = "default"
|
||||
)
|
||||
type AppContext struct {
|
||||
RestConfig *rest.Config
|
||||
Client client.Client
|
||||
|
||||
var (
|
||||
debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
CommonFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &Kubeconfig,
|
||||
Value: os.Getenv("HOME") + "/.kube/config",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Destination: &namespace,
|
||||
},
|
||||
}
|
||||
)
|
||||
// Global flags
|
||||
Debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewApp() *cli.App {
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
app.Usage = "CLI for K3K"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
func NewRootCmd() *cobra.Command {
|
||||
appCtx := &AppContext{}
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3kcli",
|
||||
Short: "CLI for K3K",
|
||||
Version: buildinfo.Version,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
InitializeConfig(cmd)
|
||||
|
||||
if appCtx.Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = apiextensionsv1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
|
||||
return nil
|
||||
},
|
||||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
rootCmd.PersistentFlags().StringVar(&appCtx.Kubeconfig, "kubeconfig", "", "kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)")
|
||||
rootCmd.PersistentFlags().BoolVar(&appCtx.Debug, "debug", false, "Turn on debug logs")
|
||||
|
||||
rootCmd.AddCommand(
|
||||
NewClusterCmd(appCtx),
|
||||
NewPolicyCmd(appCtx),
|
||||
NewKubeconfigCmd(appCtx),
|
||||
)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func (ctx *AppContext) Namespace(name string) string {
|
||||
if ctx.namespace != "" {
|
||||
return ctx.namespace
|
||||
}
|
||||
|
||||
return "k3k-" + name
|
||||
}
|
||||
|
||||
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
|
||||
if kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
return kubeConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func CobraFlagNamespace(appCtx *AppContext, flag *pflag.FlagSet) {
|
||||
flag.StringVarP(&appCtx.namespace, "namespace", "n", "", "namespace of the k3k cluster")
|
||||
}
|
||||
|
||||
func InitializeConfig(cmd *cobra.Command) {
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
// Bind the current command's flags to viper
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
// Apply the viper config value to the flag when the flag is not set and viper has a value
|
||||
if !f.Changed && viper.IsSet(f.Name) {
|
||||
val := viper.Get(f.Name)
|
||||
_ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func Namespace() string {
|
||||
if namespace == "" {
|
||||
return defaultNamespace
|
||||
}
|
||||
return namespace
|
||||
})
|
||||
}
|
||||
|
||||
104
cli/cmds/table_printer.go
Normal file
104
cli/cmds/table_printer.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning
|
||||
func createTable[T runtime.Object](crd *apiextensionsv1.CustomResourceDefinition, objs []T) *metav1.Table {
|
||||
printerColumns := getPrinterColumnsFromCRD(crd)
|
||||
|
||||
return &metav1.Table{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"},
|
||||
ColumnDefinitions: convertToTableColumns(printerColumns),
|
||||
Rows: createTableRows(objs, printerColumns),
|
||||
}
|
||||
}
|
||||
|
||||
func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []apiextensionsv1.CustomResourceColumnDefinition {
|
||||
printerColumns := []apiextensionsv1.CustomResourceColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: "Name of the Resource", JSONPath: ".metadata.name"},
|
||||
}
|
||||
|
||||
for _, version := range crd.Spec.Versions {
|
||||
if version.Name == "v1alpha1" {
|
||||
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return printerColumns
|
||||
}
|
||||
|
||||
func convertToTableColumns(printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableColumnDefinition {
|
||||
var columnDefinitions []metav1.TableColumnDefinition
|
||||
|
||||
for _, col := range printerColumns {
|
||||
columnDefinitions = append(columnDefinitions, metav1.TableColumnDefinition{
|
||||
Name: col.Name,
|
||||
Type: col.Type,
|
||||
Format: col.Format,
|
||||
Description: col.Description,
|
||||
Priority: col.Priority,
|
||||
})
|
||||
}
|
||||
|
||||
return columnDefinitions
|
||||
}
|
||||
|
||||
func createTableRows[T runtime.Object](objs []T, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableRow {
|
||||
var rows []metav1.TableRow
|
||||
|
||||
for _, obj := range objs {
|
||||
objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj)
|
||||
if err != nil {
|
||||
rows = append(rows, metav1.TableRow{Cells: []any{"<error: " + err.Error() + ">"}})
|
||||
continue
|
||||
}
|
||||
|
||||
rows = append(rows, metav1.TableRow{
|
||||
Cells: buildRowCells(objMap, printerColumns),
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
})
|
||||
}
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []any {
|
||||
var cells []any
|
||||
|
||||
for _, printCol := range printerColumns {
|
||||
j := jsonpath.New(printCol.Name)
|
||||
|
||||
err := j.Parse("{" + printCol.JSONPath + "}")
|
||||
if err != nil {
|
||||
cells = append(cells, "<error>")
|
||||
continue
|
||||
}
|
||||
|
||||
results, err := j.FindResults(objMap)
|
||||
if err != nil || len(results) == 0 || len(results[0]) == 0 {
|
||||
cells = append(cells, "<none>")
|
||||
continue
|
||||
}
|
||||
|
||||
cells = append(cells, results[0][0].Interface())
|
||||
}
|
||||
|
||||
return cells
|
||||
}
|
||||
|
||||
func toPointerSlice[T any](v []T) []*T {
|
||||
vPtr := make([]*T, len(v))
|
||||
|
||||
for i := range v {
|
||||
vPtr[i] = &v[i]
|
||||
}
|
||||
|
||||
return vPtr
|
||||
}
|
||||
22
cli/main.go
22
cli/main.go
@@ -1,30 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/cli/cmds/cluster"
|
||||
"github.com/rancher/k3k/cli/cmds/kubeconfig"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Version = buildinfo.Version
|
||||
cli.VersionPrinter = func(cCtx *cli.Context) {
|
||||
fmt.Println("k3kcli Version: " + buildinfo.Version)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
cluster.NewCommand(),
|
||||
kubeconfig.NewCommand(),
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
app := cmds.NewRootCmd()
|
||||
if err := app.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the `k3kcli` documentation for more details.
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
|
||||
|
||||
|
||||
|
||||
@@ -94,14 +94,14 @@ In this example we are exposing the Cluster with a Nginx ingress-controller, tha
|
||||
|
||||
### `clusterCIDR`
|
||||
|
||||
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16`.
|
||||
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16` in shared mode, and `10.52.0.0/16` in virtual mode.
|
||||
|
||||
|
||||
### `serviceCIDR`
|
||||
|
||||
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16`.
|
||||
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16` in shared mode, and `10.53.0.0/16` in virtual mode.
|
||||
|
||||
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts.
|
||||
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts and in `virtual` mode both `serviceCIDR` and `clusterCIDR` should be different than the host cluster.
|
||||
|
||||
|
||||
### `clusterDNS`
|
||||
@@ -112,3 +112,21 @@ The `clusterDNS` field specifies the IP address for the CoreDNS service. It need
|
||||
### `serverArgs`
|
||||
|
||||
The `serverArgs` field allows you to specify additional arguments to be passed to the K3s server pods.
|
||||
|
||||
## Using the cli
|
||||
|
||||
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
|
||||
|
||||
### No storage provider:
|
||||
|
||||
* Ephemeral Storage:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create --persistence-type ephemeral my-cluster
|
||||
```
|
||||
|
||||
*Important Notes:*
|
||||
|
||||
* Using `--persistence-type ephemeral` will result in data loss if the nodes are restarted.
|
||||
|
||||
* It is highly recommended to use `--persistence-type dynamic` with a configured storage class.
|
||||
@@ -88,6 +88,25 @@ K3k consists of two main components:
|
||||
* **CLI:** The K3k CLI provides a command-line interface for interacting with K3k. It allows users to easily create, manage, and access virtual clusters. The CLI simplifies common tasks such as creating `Cluster` CRs, retrieving kubeconfigs for accessing virtual clusters, and performing other management operations.
|
||||
|
||||
|
||||
## VirtualClusterPolicy
|
||||
|
||||
K3k introduces the VirtualClusterPolicy Custom Resource, a way to set up and apply common configurations and how your virtual clusters operate within the K3k environment.
|
||||
|
||||
The primary goal of VCPs is to allow administrators to centrally manage and apply consistent policies. This reduces repetitive configuration, helps meet organizational standards, and enhances the security and operational consistency of virtual clusters managed by K3k.
|
||||
|
||||
A VirtualClusterPolicy is bound to one or more Kubernetes Namespaces. Once bound, the rules defined in the VCP apply to all K3k virtual clusters that are running or get created in that Namespace. This allows for flexible policy application, meaning different Namespaces can use their own unique VCPs, while others can share a single VCP for a consistent setup.
|
||||
|
||||
Common use cases for administrators leveraging VirtualClusterPolicy include:
|
||||
|
||||
- Defining the operational mode (like "shared" or "virtual") for virtual clusters.
|
||||
- Setting up resource quotas and limit ranges to effectively manage how much resources virtual clusters and their workloads can use.
|
||||
- Enforcing security standards, for example, by configuring Pod Security Admission (PSA) labels for Namespaces.
|
||||
|
||||
The K3k controller actively monitors VirtualClusterPolicy resources and the corresponding Namespace bindings. When a VCP is applied or updated, the controller ensures that the defined configurations are enforced on the relevant virtual clusters and their associated resources within the targeted Namespaces.
|
||||
|
||||
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
|
||||
|
||||
## Comparison and Trade-offs
|
||||
|
||||
K3k offers two distinct modes for deploying virtual clusters: `shared` and `virtual`. Each mode has its own strengths and weaknesses, and the best choice depends on the specific needs and priorities of the user. Here's a comparison to help you make an informed decision:
|
||||
|
||||
31
docs/cli/genclidoc.go
Normal file
31
docs/cli/genclidoc.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Instantiate the CLI application
|
||||
k3kcli := cmds.NewRootCmd()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
outputDir := path.Join(wd, "docs/cli")
|
||||
|
||||
if err := doc.GenMarkdownTree(k3kcli, outputDir); err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Documentation generated at " + outputDir)
|
||||
}
|
||||
18
docs/cli/k3kcli.md
Normal file
18
docs/cli/k3kcli.md
Normal file
@@ -0,0 +1,18 @@
|
||||
## k3kcli
|
||||
|
||||
CLI for K3K
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
-h, --help help for k3kcli
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
24
docs/cli/k3kcli_cluster.md
Normal file
24
docs/cli/k3kcli_cluster.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## k3kcli cluster
|
||||
|
||||
cluster command
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
|
||||
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
|
||||
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster
|
||||
|
||||
50
docs/cli/k3kcli_cluster_create.md
Normal file
50
docs/cli/k3kcli_cluster_create.md
Normal file
@@ -0,0 +1,50 @@
|
||||
## k3kcli cluster create
|
||||
|
||||
Create new cluster
|
||||
|
||||
```
|
||||
k3kcli cluster create [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster create [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--agent-args strings agents extra arguments
|
||||
--agent-envs strings agents extra Envs
|
||||
--agents int number of agents
|
||||
--cluster-cidr string cluster CIDR
|
||||
--custom-certs string The path for custom certificate directory
|
||||
-h, --help help for create
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--mirror-host-nodes Mirror Host Cluster Nodes
|
||||
--mode string k3k mode type (shared, virtual) (default "shared")
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--persistence-type string persistence mode for the nodes (dynamic, ephemeral, static) (default "dynamic")
|
||||
--policy string The policy to create the cluster in
|
||||
--server-args strings servers extra arguments
|
||||
--server-envs strings servers extra Envs
|
||||
--servers int number of servers (default 1)
|
||||
--service-cidr string service CIDR
|
||||
--storage-class-name string storage class name for dynamic persistence type
|
||||
--storage-request-size string storage size for dynamic persistence type
|
||||
--token string token of the cluster
|
||||
--version string k3s version
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
33
docs/cli/k3kcli_cluster_delete.md
Normal file
33
docs/cli/k3kcli_cluster_delete.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## k3kcli cluster delete
|
||||
|
||||
Delete an existing cluster
|
||||
|
||||
```
|
||||
k3kcli cluster delete [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster delete [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
--keep-data keeps persistence volumes created for the cluster after deletion
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
32
docs/cli/k3kcli_cluster_list.md
Normal file
32
docs/cli/k3kcli_cluster_list.md
Normal file
@@ -0,0 +1,32 @@
|
||||
## k3kcli cluster list
|
||||
|
||||
List all the existing cluster
|
||||
|
||||
```
|
||||
k3kcli cluster list [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster list [command options]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
22
docs/cli/k3kcli_kubeconfig.md
Normal file
22
docs/cli/k3kcli_kubeconfig.md
Normal file
@@ -0,0 +1,22 @@
|
||||
## k3kcli kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for kubeconfig
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters
|
||||
|
||||
33
docs/cli/k3kcli_kubeconfig_generate.md
Normal file
33
docs/cli/k3kcli_kubeconfig_generate.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## k3kcli kubeconfig generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
|
||||
```
|
||||
k3kcli kubeconfig generate [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--altNames strings altNames of the generated certificates for the kubeconfig
|
||||
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
|
||||
--config-name string the name of the generated kubeconfig file
|
||||
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
|
||||
-h, --help help for generate
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--name string cluster name
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
|
||||
24
docs/cli/k3kcli_policy.md
Normal file
24
docs/cli/k3kcli_policy.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## k3kcli policy
|
||||
|
||||
policy command
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for policy
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
|
||||
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
|
||||
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies
|
||||
|
||||
32
docs/cli/k3kcli_policy_create.md
Normal file
32
docs/cli/k3kcli_policy_create.md
Normal file
@@ -0,0 +1,32 @@
|
||||
## k3kcli policy create
|
||||
|
||||
Create new policy
|
||||
|
||||
```
|
||||
k3kcli policy create [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy create [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for create
|
||||
--mode string The allowed mode type of the policy (default "shared")
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
31
docs/cli/k3kcli_policy_delete.md
Normal file
31
docs/cli/k3kcli_policy_delete.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## k3kcli policy delete
|
||||
|
||||
Delete an existing policy
|
||||
|
||||
```
|
||||
k3kcli policy delete [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy delete [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
31
docs/cli/k3kcli_policy_list.md
Normal file
31
docs/cli/k3kcli_policy_list.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## k3kcli policy list
|
||||
|
||||
List all the existing policies
|
||||
|
||||
```
|
||||
k3kcli policy list [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy list [command options]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
CRD_REF_DOCS_VER := v0.1.0
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
.PHONY: generate
|
||||
generate:
|
||||
$(CRD_REF_DOCS) --config=config.yaml --renderer=markdown --source-path=../../pkg/apis/k3k.io/v1alpha1 --output-path=crd-docs.md
|
||||
@@ -1,9 +1,4 @@
|
||||
processor:
|
||||
# RE2 regular expressions describing types that should be excluded from the generated documentation.
|
||||
ignoreTypes:
|
||||
- ClusterSet
|
||||
- ClusterSetList
|
||||
|
||||
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
|
||||
ignoreFields:
|
||||
- "status$"
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
### Resource Types
|
||||
- [Cluster](#cluster)
|
||||
- [ClusterList](#clusterlist)
|
||||
- [VirtualClusterPolicy](#virtualclusterpolicy)
|
||||
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +19,7 @@
|
||||
|
||||
|
||||
|
||||
|
||||
Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
|
||||
|
||||
|
||||
@@ -26,15 +28,17 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretNamespace` _string_ | | | |
|
||||
| `secretRef` _string_ | | | |
|
||||
| `secretNamespace` _string_ | SecretNamespace is the namespace of the Secret. | | |
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
|
||||
|
||||
|
||||
@@ -46,31 +50,14 @@ _Appears in:_
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `Cluster` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterLimit
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit is the limits (cpu/mem) that apply to the server nodes | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit is the limits (cpu/mem) that apply to the agent nodes | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
|
||||
|
||||
|
||||
|
||||
ClusterList is a list of Cluster resources.
|
||||
|
||||
|
||||
|
||||
@@ -95,6 +82,20 @@ _Validation:_
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
|
||||
|
||||
#### ClusterPhase
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
|
||||
|
||||
@@ -102,7 +103,7 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
|
||||
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
|
||||
|
||||
|
||||
@@ -111,32 +112,111 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `version` _string_ | Version is a string representing the Kubernetes version to be used by the virtual nodes. | | |
|
||||
| `servers` _integer_ | Servers is the number of K3s pods to run in server (controlplane) mode. | 1 | |
|
||||
| `agents` _integer_ | Agents is the number of K3s pods to run in agent (worker) mode. | 0 | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is the node selector that will be applied to all server/agent pods.<br />In "shared" mode the node selector will be applied also to the workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass is the priorityClassName that will be applied to all server/agent pods.<br />In "shared" mode the priorityClassName will be applied also to the workloads. | | |
|
||||
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit is the limits that apply for the server/worker nodes. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is Secret reference used as a token join server and worker nodes to the cluster. The controller<br />assumes that the secret has a field "token" in its data, any other fields in the secret will be ignored. | | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for the pods of the cluster. Defaults to 10.42.0.0/16. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for the services in the cluster. Defaults to 10.43.0.0/16. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the coredns service. Needs to be in the range provided by ServiceCIDR or CoreDNS may not deploy.<br />Defaults to 10.43.0.10. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in server mode. | | |
|
||||
| `agentArgs` _string array_ | AgentArgs are the ordered key value pairs (e.x. "testArg", "testValue") for the K3s pods running in agent mode. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use. | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup. | | |
|
||||
| `mode` _[ClusterMode](#clustermode)_ | Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared" | shared | Enum: [shared virtual] <br /> |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data<br />persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. | \{ type:dynamic \} | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a<br />clusterIP which is relatively secure, but difficult to access outside of the cluster. | | |
|
||||
| `version` _string_ | Version is the K3s version to use for the virtual nodes.<br />It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).<br />If not specified, the Kubernetes version of the host node will be used. | | |
|
||||
| `mode` _[ClusterMode](#clustermode)_ | Mode specifies the cluster provisioning mode: "shared" or "virtual".<br />Defaults to "shared". This field is immutable. | shared | Enum: [shared virtual] <br /> |
|
||||
| `servers` _integer_ | Servers specifies the number of K3s pods to run in server (control plane) mode.<br />Must be at least 1. Defaults to 1. | 1 | |
|
||||
| `agents` _integer_ | Agents specifies the number of K3s pods to run in agent (worker) mode.<br />Must be 0 or greater. Defaults to 0.<br />This field is ignored in "shared" mode. | 0 | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
|
||||
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
|
||||
| `serverEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | ServerEnvs specifies list of environment variables to set in the server pod. | | |
|
||||
| `agentEnvs` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core) array_ | AgentEnvs specifies list of environment variables to set in the agent pod. | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
|
||||
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### ConfigMapSyncConfig
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### CredentialSource
|
||||
|
||||
|
||||
|
||||
CredentialSource defines where to get a credential from.
|
||||
It can represent either a TLS key pair or a single private key.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [CredentialSources](#credentialsources)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
|
||||
|
||||
|
||||
#### CredentialSources
|
||||
|
||||
|
||||
|
||||
CredentialSources lists all the required credentials, including both
|
||||
TLS key pairs and single signing keys.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [CustomCAs](#customcas)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverCA` _[CredentialSource](#credentialsource)_ | ServerCA specifies the server-ca cert/key pair. | | |
|
||||
| `clientCA` _[CredentialSource](#credentialsource)_ | ClientCA specifies the client-ca cert/key pair. | | |
|
||||
| `requestHeaderCA` _[CredentialSource](#credentialsource)_ | RequestHeaderCA specifies the request-header-ca cert/key pair. | | |
|
||||
| `etcdServerCA` _[CredentialSource](#credentialsource)_ | ETCDServerCA specifies the etcd-server-ca cert/key pair. | | |
|
||||
| `etcdPeerCA` _[CredentialSource](#credentialsource)_ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. | | |
|
||||
| `serviceAccountToken` _[CredentialSource](#credentialsource)_ | ServiceAccountToken specifies the service-account-token key. | | |
|
||||
|
||||
|
||||
#### CustomCAs
|
||||
|
||||
|
||||
|
||||
CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
|
||||
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
|
||||
|
||||
|
||||
#### ExposeConfig
|
||||
|
||||
|
||||
|
||||
|
||||
ExposeConfig specifies options for exposing the API server.
|
||||
|
||||
|
||||
|
||||
@@ -145,16 +225,16 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | | | |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
|
||||
|
||||
|
||||
#### IngressConfig
|
||||
|
||||
|
||||
|
||||
|
||||
IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
|
||||
|
||||
|
||||
@@ -163,15 +243,32 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `annotations` _object (keys:string, values:string)_ | Annotations is a key value map that will enrich the Ingress annotations | | |
|
||||
| `ingressClassName` _string_ | | | |
|
||||
| `annotations` _object (keys:string, values:string)_ | Annotations specifies annotations to add to the Ingress. | | |
|
||||
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
|
||||
|
||||
|
||||
#### IngressSyncConfig
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
|
||||
|
||||
|
||||
|
||||
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
|
||||
|
||||
|
||||
@@ -180,14 +277,15 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | | | |
|
||||
| `serverPort` _integer_ | ServerPort is the port on which the K3s server is exposed when type is LoadBalancer.<br />If not specified, the default https 443 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer.<br />If not specified, the default etcd 2379 port will be allocated.<br />If 0 or negative, the port will not be exposed. | | |
|
||||
|
||||
|
||||
#### NodePortConfig
|
||||
|
||||
|
||||
|
||||
|
||||
NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
|
||||
|
||||
|
||||
@@ -196,28 +294,26 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767) | | |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a random port between 30000-32767 will be allocated.<br />If out of range, the port will not be exposed. | | |
|
||||
|
||||
|
||||
#### PersistenceConfig
|
||||
|
||||
|
||||
|
||||
|
||||
PersistenceConfig specifies options for persisting etcd data.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | | dynamic | |
|
||||
| `storageClassName` _string_ | | | |
|
||||
| `storageRequestSize` _string_ | | | |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
@@ -233,5 +329,170 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PersistentVolumeClaimSyncConfig
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### PodSecurityAdmissionLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
|
||||
_Validation:_
|
||||
- Enum: [privileged baseline restricted]
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
|
||||
|
||||
#### PriorityClassSyncConfig
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### ServiceSyncConfig
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SyncConfig
|
||||
|
||||
|
||||
|
||||
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
|
||||
| `configmaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
|
||||
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
|
||||
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
|
||||
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicy
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicy allows defining common configurations and constraints
|
||||
for clusters within a clusterpolicy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicyList](#virtualclusterpolicylist)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicy` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicyList
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `VirtualClusterPolicyList` | | |
|
||||
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicySpec
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [VirtualClusterPolicy](#virtualclusterpolicy)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `quota` _[ResourceQuotaSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcequotaspec-v1-core)_ | Quota specifies the resource limits for clusters within a clusterpolicy. | | |
|
||||
| `limit` _[LimitRangeSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core)_ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy<br />to set defaults and constraints (min/max) | | |
|
||||
| `defaultNodeSelector` _object (keys:string, values:string)_ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. | | |
|
||||
| `defaultPriorityClass` _string_ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. | | |
|
||||
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
|
||||
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
|
||||
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,155 @@
|
||||
# Development
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To start developing K3k you will need:
|
||||
|
||||
- Go
|
||||
- Docker
|
||||
- Helm
|
||||
- A running Kubernetes cluster
|
||||
|
||||
|
||||
### TLDR
|
||||
|
||||
```shell
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# These environment variables configure the image repository and tag.
|
||||
export REPO=ghcr.io/myuser
|
||||
export VERSION=dev-$(date -u '+%Y%m%d%H%M')
|
||||
|
||||
make
|
||||
make push
|
||||
make install
|
||||
```
|
||||
|
||||
### Makefile
|
||||
|
||||
To see all the available Make commands you can run `make help`, i.e:
|
||||
|
||||
```
|
||||
-> % make help
|
||||
all Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package'
|
||||
version Print the current version
|
||||
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
push Push the K3k images to the registry
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-kubelet-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
generate Generate the CRDs specs
|
||||
docs Build the CRDs and CLI docs
|
||||
lint Find any linting issues in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
help Show this help.
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`.
|
||||
|
||||
By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var:
|
||||
|
||||
```
|
||||
REPO=ghcr.io/userorg make
|
||||
```
|
||||
|
||||
To customize the tag you can also explicitly set the VERSION:
|
||||
|
||||
```
|
||||
VERSION=dev-$(date -u '+%Y%m%d%H%M') make
|
||||
```
|
||||
|
||||
|
||||
### Push
|
||||
|
||||
You will need to push the built images to your registry, and you can use the `make push` command to do this.
|
||||
|
||||
|
||||
### Install
|
||||
|
||||
Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release.
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
To run the tests we use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
|
||||
|
||||
Install the required binaries from `envtest` with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), and then put them in the default path `/usr/local/kubebuilder/bin`:
|
||||
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
|
||||
```
|
||||
ENVTEST_BIN=$(setup-envtest use -p path)
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
sudo cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
|
||||
The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder.
|
||||
|
||||
|
||||
## CRDs and Docs
|
||||
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make generate`.
|
||||
|
||||
Remember also to update the CRDs documentation running the `make docs` command.
|
||||
|
||||
## How to install k3k on k3d
|
||||
|
||||
This document provides a guide on how to install k3k on [k3d](https://k3d.io).
|
||||
|
||||
### Installing k3d
|
||||
|
||||
Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation.
|
||||
|
||||
Create the k3d cluster in the following way:
|
||||
|
||||
```bash
|
||||
k3d cluster create k3k -p "30000-30010:30000-30010@server:0"
|
||||
```
|
||||
|
||||
then run `ginkgo run ./...`.
|
||||
With this syntax ports from 30000 to 30010 will be exposed on the host.
|
||||
|
||||
### Install k3k
|
||||
|
||||
Install now k3k as usual:
|
||||
|
||||
```bash
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k
|
||||
```
|
||||
|
||||
### Create a virtual cluster
|
||||
|
||||
Once the k3k controller is up and running, create a namespace where to create our first virtual cluster.
|
||||
|
||||
```bash
|
||||
kubectl create ns k3k-mycluster
|
||||
```
|
||||
|
||||
Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step:
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
expose:
|
||||
nodePort:
|
||||
serverPort: 30001
|
||||
EOF
|
||||
```
|
||||
|
||||
Check when the cluster is ready:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-mycluster
|
||||
```
|
||||
|
||||
Last thing to do is to get the kubeconfig to connect to the virtual cluster we've just created:
|
||||
|
||||
```bash
|
||||
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
|
||||
```
|
||||
|
||||
92
docs/howtos/airgap.md
Normal file
92
docs/howtos/airgap.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# K3k Air Gap Installation Guide
|
||||
|
||||
Applicable K3k modes: `virtual`, `shared`
|
||||
|
||||
This guide describes how to deploy **K3k** in an **air-gapped environment**, including the packaging of required images, Helm chart configurations, and cluster creation using a private container registry.
|
||||
|
||||
---
|
||||
|
||||
## 1. Package Required Container Images
|
||||
|
||||
### 1.1: Follow K3s Air Gap Preparation
|
||||
|
||||
Begin with the official K3s air gap packaging instructions:
|
||||
[K3s Air Gap Installation Docs](https://docs.k3s.io/installation/airgap)
|
||||
|
||||
### 1.2: Include K3k-Specific Images
|
||||
|
||||
In addition to the K3s images, make sure to include the following in your image bundle:
|
||||
|
||||
| Image Names | Descriptions |
|
||||
| --------------------------- | --------------------------------------------------------------- |
|
||||
| `rancher/k3k:<tag>` | K3k controller image (replace `<tag>` with the desired version) |
|
||||
| `rancher/k3k-kubelet:<tag>` | K3k agent image for shared mode |
|
||||
| `rancher/k3s:<tag>` | K3s server/agent image for virtual clusters |
|
||||
|
||||
Load these images into your internal (air-gapped) registry.
|
||||
|
||||
---
|
||||
|
||||
## 2. Configure Helm Chart for Air Gap installation
|
||||
|
||||
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
|
||||
|
||||
```yaml
|
||||
controller:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
virtual:
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
shared:
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
server:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
```
|
||||
|
||||
These values enforce the use of internal image repositories for the K3k controller, the agent and the server.
|
||||
|
||||
**Note** : All virtual clusters will use automatically those settings.
|
||||
|
||||
---
|
||||
|
||||
## 3. Enforce Registry in Virtual Clusters
|
||||
|
||||
When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create \
|
||||
--server-args "--system-default-registry=registry.internal.domain" \
|
||||
my-cluster
|
||||
```
|
||||
|
||||
This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls.
|
||||
[K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help)
|
||||
|
||||
---
|
||||
|
||||
## 4. Specify K3s Version for Virtual Clusters
|
||||
|
||||
K3k allows specifying the K3s version used in each virtual cluster:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create \
|
||||
--k3s-version v1.29.4+k3s1 \
|
||||
my-cluster
|
||||
```
|
||||
|
||||
- If omitted, the **host cluster’s K3s version** will be used by default, which might not exist if it's not part of the air gap package.
|
||||
79
docs/howtos/choose-mode.md
Normal file
79
docs/howtos/choose-mode.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# How to Choose Between Shared and Virtual Mode
|
||||
|
||||
This guide helps you choose the right mode for your virtual cluster: **Shared** or **Virtual**.
|
||||
If you're unsure, start with **Shared mode** — it's the default and fits most common scenarios.
|
||||
|
||||
---
|
||||
|
||||
## Shared Mode (default)
|
||||
|
||||
**Best for:**
|
||||
- Developers who want to run workloads quickly without managing Kubernetes internals
|
||||
- Platform teams that require visibility and control over all workloads
|
||||
- Users who need access to host-level resources (e.g., GPUs)
|
||||
|
||||
In **Shared mode**, the virtual cluster runs its own K3s server but relies on the host to execute workloads. The virtual kubelet syncs resources, enabling lightweight, fast provisioning with support for cluster resource isolation. More details on the [architecture](./../architecture.md#shared-mode).
|
||||
|
||||
---
|
||||
|
||||
### Use Cases by Persona
|
||||
|
||||
#### 👩💻 Developer
|
||||
*"I’m building a web app that should be exposed outside the virtual cluster."*
|
||||
→ Use **Shared mode**. It allows you to [expose](./expose-workloads.md) your application.
|
||||
|
||||
#### 👩🔬 Data Scientist:
|
||||
*“I need to run Jupyter notebooks that leverage the cluster's GPU.”*
|
||||
→ Use **Shared mode**. It gives access to physical devices while keeping overhead low.
|
||||
|
||||
#### 🧑💼 Platform Admin
|
||||
*"I want to monitor and secure all tenant workloads from a central location."*
|
||||
→ Use **Shared mode**. Host-level agents (e.g., observability, policy enforcement) work across all virtual clusters.
|
||||
|
||||
#### 🔒 Security Engineer
|
||||
*"I need to enforce security policies like network policies or runtime scanning across all workloads."*
|
||||
→ Use **Shared mode**. The platform can enforce policies globally without tenant bypass.
|
||||
|
||||
*"I need to test a new admission controller or policy engine."*
|
||||
→ Use **Shared mode**, if it's scoped to your virtual cluster. You can run tools like Kubewarden without affecting the host.
|
||||
|
||||
#### 🔁 CI/CD Engineer
|
||||
*"I want to spin up disposable virtual clusters per pipeline run, fast and with low resource cost."*
|
||||
→ Use **Shared mode**. It's quick to provision and ideal for short-lived, namespace-scoped environments.
|
||||
|
||||
---
|
||||
|
||||
## Virtual Mode
|
||||
|
||||
**Best for:**
|
||||
- Advanced users who need full Kubernetes isolation
|
||||
- Developers testing experimental or cluster-wide features
|
||||
- Use cases requiring control over the entire Kubernetes control plane
|
||||
|
||||
In **Virtual mode**, the virtual cluster runs its own isolated Kubernetes control plane. It supports different CNIs, and API configurations — ideal for deep experimentation or advanced workloads. More details on the [architecture](./../architecture.md#virtual-mode).
|
||||
|
||||
---
|
||||
|
||||
### Use Cases by Persona
|
||||
|
||||
#### 👩💻 Developer
|
||||
*"I need to test a new Kubernetes feature gate that’s disabled in the host cluster."*
|
||||
→ Use **Virtual mode**. You can configure your own control plane flags and API features.
|
||||
|
||||
#### 🧑💼 Platform Admin
|
||||
*"We’re testing upgrades across Kubernetes versions, including new API behaviors."*
|
||||
→ Use Virtual mode. You can run different Kubernetes versions and safely validate upgrade paths.
|
||||
|
||||
#### 🌐 Network Engineer
|
||||
*"I’m evaluating a new CNI that needs full control of the cluster’s networking."*
|
||||
→ Use **Virtual mode**. You can run a separate CNI stack without affecting the host or other tenants.
|
||||
|
||||
#### 🔒 Security Engineer
|
||||
*"I’m testing a new admission controller and policy engine before rolling it out cluster-wide."*
|
||||
→ Use **Virtual mode**, if you need to test cluster-wide policies, custom admission flow, or advanced extensions with full control.
|
||||
|
||||
---
|
||||
|
||||
## Still Not Sure?
|
||||
|
||||
If you're evaluating more advanced use cases or want a deeper comparison, see the full trade-off breakdown in the [Architecture documentation](../architecture.md).
|
||||
302
docs/howtos/create-virtual-clusters.md
Normal file
302
docs/howtos/create-virtual-clusters.md
Normal file
@@ -0,0 +1,302 @@
|
||||
# How to: Create a Virtual Cluster
|
||||
|
||||
This guide walks through the various ways to create and manage virtual clusters in K3K. We'll cover common use cases using both the **Custom Resource Definitions (CRDs)** and the **K3K CLI**, so you can choose the method that fits your workflow.
|
||||
|
||||
> 📘 For full reference:
|
||||
> - [CRD Reference Documentation](../crds/crd-docs.md)
|
||||
> - [CLI Reference Documentation](../cli/cli-docs.md)
|
||||
> - [Full example](../advanced-usage.md)
|
||||
|
||||
> [!NOTE]
|
||||
> 🚧 Some features are currently only available via the CRD interface. CLI support may be added in the future.
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create and Expose a Basic Virtual Cluster
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ingress
|
||||
spec:
|
||||
tlsSANs:
|
||||
- my-cluster.example.com
|
||||
expose:
|
||||
ingress:
|
||||
ingressClassName: nginx
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "HTTPS"
|
||||
```
|
||||
|
||||
This will create a virtual cluster in `shared` mode and expose it via an ingress with the specified hostname.
|
||||
|
||||
### CLI Method
|
||||
|
||||
*No CLI method available yet*
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster with Persistent Storage (**Default**)
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-persistent
|
||||
spec:
|
||||
persistence:
|
||||
type: dynamic
|
||||
storageClassName: local-path
|
||||
storageRequestSize: 30Gi
|
||||
```
|
||||
|
||||
This ensures that the virtual cluster stores its state persistently with a 30Gi volume.
|
||||
If `storageClassName` is not set it will default to the default StorageClass.
|
||||
If `storageRequestSize` is not set it will request a 1Gi volume by default.
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--persistence-type dynamic \
|
||||
--storage-class-name local-path \
|
||||
k3kcluster-persistent
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> The `k3kcli` does not support configuring the `storageRequestSize` yet.
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Highly Available Virtual Cluster in `shared` mode
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ha
|
||||
spec:
|
||||
servers: 3
|
||||
```
|
||||
|
||||
This will create a virtual cluster with 3 servers and a default 1Gi volume for persistence.
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--servers 3 \
|
||||
k3kcluster-ha
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Highly Available Virtual Cluster in `virtual` mode
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-virtual
|
||||
spec:
|
||||
mode: virtual
|
||||
servers: 3
|
||||
agents: 3
|
||||
```
|
||||
|
||||
This will create a virtual cluster with 3 servers and 3 agents and a default 1Gi volume for persistence.
|
||||
> [!NOTE]
|
||||
> Agents only exist for `virtual` mode.
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--agents 3 \
|
||||
--servers 3 \
|
||||
--mode virtual \
|
||||
k3kcluster-virtual
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create an Ephemeral Virtual Cluster
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-ephemeral
|
||||
spec:
|
||||
persistence:
|
||||
type: ephemeral
|
||||
```
|
||||
|
||||
This will create an ephemeral virtual cluster with no persistence and a single server.
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--persistence-type ephemeral \
|
||||
k3kcluster-ephemeral
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster with a Custom Kubernetes Version
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-custom-k8s
|
||||
spec:
|
||||
version: "v1.33.1-k3s1"
|
||||
```
|
||||
|
||||
This sets the virtual cluster's Kubernetes version explicitly.
|
||||
> [!NOTE]
|
||||
> Only [K3s](https://k3s.io) distributions are supported. You can find compatible versions on the K3s GitHub [release page](https://github.com/k3s-io/k3s/releases).
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--version v1.33.1-k3s1 \
|
||||
k3kcluster-custom-k8s
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster with Custom Resource Limits
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-resourced
|
||||
spec:
|
||||
mode: virtual
|
||||
serverLimit:
|
||||
cpu: "1"
|
||||
memory: "2Gi"
|
||||
workerLimit:
|
||||
cpu: "1"
|
||||
memory: "2Gi"
|
||||
```
|
||||
|
||||
This configures the CPU and memory limit for the virtual cluster.
|
||||
|
||||
### CLI Method
|
||||
|
||||
*No CLI method available yet*
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster on specific host nodes
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-node-placed
|
||||
spec:
|
||||
nodeSelector:
|
||||
disktype: ssd
|
||||
```
|
||||
|
||||
This places the virtual cluster on nodes with the label `disktype: ssd`.
|
||||
> [!NOTE]
|
||||
> In `shared` mode workloads are also scheduled on the selected nodes
|
||||
|
||||
### CLI Method
|
||||
|
||||
*No CLI method available yet*
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster with a Rancher Host Cluster Kubeconfig
|
||||
|
||||
When using a `kubeconfig` generated with Rancher, you need to specify with the CLI the desired host for the virtual cluster `kubeconfig`.
|
||||
By default, `k3kcli` uses the current host `kubeconfig` to determine the target cluster.
|
||||
|
||||
### CRD Method
|
||||
|
||||
*Not applicable*
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--kubeconfig-server https://abc.xyz \
|
||||
k3kcluster-host-rancher
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use Case: Create a Virtual Cluster Behind an HTTP Proxy
|
||||
|
||||
### CRD Method
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: k3kcluster-http-proxy
|
||||
spec:
|
||||
serverEnvs:
|
||||
- name: HTTP_PROXY
|
||||
value: "http://abc.xyz"
|
||||
agentEnvs:
|
||||
- name: HTTP_PROXY
|
||||
value: "http://abc.xyz"
|
||||
```
|
||||
|
||||
This configures an HTTP proxy for both servers and agents in the virtual cluster.
|
||||
> [!NOTE]
|
||||
> This can be leveraged to pass **any custom environment variables** to the servers and agents — not just proxy settings.
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli cluster create \
|
||||
--server-envs HTTP_PROXY=http://abc.xyz \
|
||||
--agent-envs HTTP_PROXY=http://abc.xyz \
|
||||
k3kcluster-http-proxy
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How to: Connect to a Virtual Cluster
|
||||
|
||||
Once the virtual cluster is running, you can connect to it using the CLI:
|
||||
|
||||
### CLI Method
|
||||
|
||||
```sh
|
||||
k3kcli kubeconfig generate --namespace k3k-mycluster --name mycluster
|
||||
export KUBECONFIG=$PWD/mycluster-kubeconfig.yaml
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
This command generates a `kubeconfig` file, which you can use to access your virtual cluster via `kubectl`.
|
||||
52
docs/howtos/expose-workloads.md
Normal file
52
docs/howtos/expose-workloads.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# How-to: Expose Workloads Outside the Virtual Cluster
|
||||
|
||||
This guide explains how to expose workloads running in k3k-managed virtual clusters to external networks. Behavior varies depending on the operating mode of the virtual cluster.
|
||||
|
||||
## Virtual Mode
|
||||
|
||||
> [!CAUTION]
|
||||
> **Not Supported**
|
||||
> In *virtual mode*, direct external exposure of workloads is **not available**.
|
||||
> This mode is designed for strong isolation and does not expose the virtual cluster's network directly.
|
||||
|
||||
## Shared Mode
|
||||
|
||||
In *shared mode*, workloads can be exposed to the external network using standard Kubernetes service types or an ingress controller, depending on your requirements.
|
||||
|
||||
> [!NOTE]
|
||||
> *`Services`* are always synced from the virtual cluster to the host cluster following the same principle described [here](../architecture.md#shared-mode) for pods.
|
||||
|
||||
### Option 1: Use `NodePort` or `LoadBalancer`
|
||||
|
||||
To expose a service such as a web application outside the host cluster:
|
||||
|
||||
- **`NodePort`**:
|
||||
Exposes the service on a static port on each node’s IP.
|
||||
Access the service at `http://<NodeIP>:<NodePort>`.
|
||||
|
||||
- **`LoadBalancer`**:
|
||||
Provisions an external load balancer (if supported by the environment) and exposes the service via the load balancer’s IP.
|
||||
|
||||
> **Note**
|
||||
> The `LoadBalancer` IP is currently not reflected back to the virtual cluster service.
|
||||
> [k3k issue #365](https://github.com/rancher/k3k/issues/365)
|
||||
|
||||
### Option 2: Use `ClusterIP` for Internal Communication
|
||||
|
||||
If the workload should only be accessible to other services or pods *within* the host cluster:
|
||||
|
||||
- Use the `ClusterIP` service type.
|
||||
This exposes the service on an internal IP, only reachable inside the host cluster.
|
||||
|
||||
### Option 3: Use Ingress for HTTP/HTTPS Routing
|
||||
|
||||
For more advanced routing (e.g., hostname- or path-based routing), deploy an **Ingress controller** in the virtual cluster, and expose it via `NodePort` or `LoadBalancer`.
|
||||
|
||||
This allows you to:
|
||||
|
||||
- Define Ingress resources in the virtual cluster.
|
||||
- Route external traffic to services within the virtual cluster.
|
||||
|
||||
>**Note**
|
||||
> Support for using the host cluster's Ingress controller from a virtual cluster is being tracked in
|
||||
> [k3k issue #356](https://github.com/rancher/k3k/issues/356)
|
||||
147
docs/howtos/troubleshooting.md
Normal file
147
docs/howtos/troubleshooting.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Troubleshooting
|
||||
|
||||
This guide walks through common troubleshooting steps for working with K3K virtual clusters.
|
||||
|
||||
---
|
||||
|
||||
## `too many open files` error
|
||||
|
||||
The `k3k-kubelet` or `k3kcluster-server-` run into the following issue:
|
||||
|
||||
```sh
|
||||
E0604 13:14:53.369369 1 leaderelection.go:336] error initially creating leader election record: Post "https://k3k-http-proxy-k3kcluster-service/apis/coordination.k8s.io/v1/namespaces/kube-system/leases": context canceled
|
||||
{"level":"fatal","timestamp":"2025-06-04T13:14:53.369Z","logger":"k3k-kubelet","msg":"virtual manager stopped","error":"too many open files"}
|
||||
```
|
||||
|
||||
This typically indicates a low limit on inotify watchers or file descriptors on the host system.
|
||||
|
||||
To increase the inotify limits connect to the host nodes and run:
|
||||
|
||||
```sh
|
||||
sudo sysctl -w fs.inotify.max_user_watches=2099999999
|
||||
sudo sysctl -w fs.inotify.max_user_instances=2099999999
|
||||
sudo sysctl -w fs.inotify.max_queued_events=2099999999
|
||||
```
|
||||
|
||||
You can persist these settings by adding them to `/etc/sysctl.conf`:
|
||||
|
||||
```sh
|
||||
fs.inotify.max_user_watches=2099999999
|
||||
fs.inotify.max_user_instances=2099999999
|
||||
fs.inotify.max_queued_events=2099999999
|
||||
```
|
||||
|
||||
Apply the changes:
|
||||
|
||||
```sh
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||
You can find more details in this [KB document](https://www.suse.com/support/kb/doc/?id=000020048).
|
||||
|
||||
---
|
||||
|
||||
## Inspect Controller Logs for Failure Diagnosis
|
||||
|
||||
To view logs for a failed virtual cluster:
|
||||
|
||||
```sh
|
||||
kubectl logs -n k3k-system -l app.kubernetes.io/name=k3k
|
||||
```
|
||||
|
||||
This retrieves logs from K3k controller components.
|
||||
|
||||
---
|
||||
|
||||
## Inspect Cluster Logs for Failure Diagnosis
|
||||
|
||||
To view logs for a failed virtual cluster:
|
||||
|
||||
```sh
|
||||
kubectl logs -n <cluster_namespace> -l cluster=<cluster_name>
|
||||
```
|
||||
|
||||
This retrieves logs from K3k cluster components (`agents, server and virtual-kubelet`).
|
||||
|
||||
> 💡 You can also use `kubectl describe cluster <cluster_name>` to check for recent events and status conditions.
|
||||
|
||||
---
|
||||
|
||||
## Virtual Cluster Not Starting or Stuck in Pending
|
||||
|
||||
Some of the most common causes are related to missing prerequisites or wrong configuration.
|
||||
|
||||
### Storage class not available
|
||||
|
||||
When creating a Virtual Cluster with `dynamic` persistence, a PVC is needed. You can check if the PVC was claimed but not bound with `kubectl get pvc -n <cluster_namespace>`. If you see a pending PVC you probably don't have a default storage class defined, or you have specified a wrong one.
|
||||
|
||||
#### Example with wrong storage class
|
||||
|
||||
The `pvc` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get pvc -n k3k-test-storage
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
varlibrancherk3s-k3k-test-storage-server-0 Pending not-available <unset> 4s
|
||||
```
|
||||
|
||||
The `server` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-test-storage
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-test-storage-kubelet-j4zn5 1/1 Running 0 54s
|
||||
k3k-test-storage-server-0 0/1 Pending 0 54s
|
||||
```
|
||||
|
||||
To fix this you should use a valid storage class, you can list existing storage class using:
|
||||
|
||||
```bash
|
||||
kubectl get storageclasses.storage.k8s.io
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 3d6h
|
||||
```
|
||||
|
||||
### Wrong node selector
|
||||
|
||||
When creating a Virtual Cluster with `defaultNodeSelector`, if the selector is not valid all pods will be pending.
|
||||
|
||||
#### Example
|
||||
|
||||
The `server` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-k3kcluster-node-placed-server-0 0/1 Pending 0 58s
|
||||
```
|
||||
|
||||
The description of the pod provide the reason:
|
||||
|
||||
```bash
|
||||
kubectl describe po k3k-k3kcluster-node-placed-server-0
|
||||
...
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning FailedScheduling 84s default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
|
||||
```
|
||||
|
||||
To fix this you should use a valid node affinity/selector.
|
||||
|
||||
### Image pull issues (airgapped setup)
|
||||
|
||||
When creating a Virtual Cluster in air-gapped environment, images need to be available in the configured registry. You can check for `ImagePullBackOff` status when getting the pods in the virtual cluster namespace.
|
||||
|
||||
#### Example
|
||||
|
||||
The `server` is failing:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-test-registry
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-test-registry-kubelet-r4zh5 1/1 Running 0 54s
|
||||
k3k-test-registry-server-0 0/1 ImagePullBackOff 0 54s
|
||||
```
|
||||
|
||||
To fix this make sure the failing image is available. You can describe the failing pod to get more details.
|
||||
147
docs/virtualclusterpolicy.md
Normal file
147
docs/virtualclusterpolicy.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# VirtualClusterPolicy
|
||||
|
||||
The VirtualClusterPolicy Custom Resource in K3k provides a way to define and enforce consistent configurations, security settings, and resource management rules for your virtual clusters and the Namespaces they operate within.
|
||||
|
||||
By using VCPs, administrators can centrally manage these aspects, reducing manual configuration, ensuring alignment with organizational standards, and enhancing the overall security and operational consistency of the K3k environment.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### What is a VirtualClusterPolicy?
|
||||
|
||||
A `VirtualClusterPolicy` is a cluster-scoped Kubernetes Custom Resource that specifies a set of rules and configurations. These policies are then applied to K3k virtual clusters (`Cluster` resources) operating within Kubernetes Namespaces that are explicitly bound to a VCP.
|
||||
|
||||
### Binding a Policy to a Namespace
|
||||
|
||||
To apply a `VirtualClusterPolicy` to one or more Namespaces (and thus to all K3k `Cluster` resources within those Namespaces), you need to label the desired Namespace(s). Add the following label to your Namespace metadata:
|
||||
|
||||
`policy.k3k.io/policy-name: <YOUR_POLICY_NAME>`
|
||||
|
||||
**Example: Labeling a Namespace**
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: my-app-namespace
|
||||
labels:
|
||||
policy.k3k.io/policy-name: "standard-dev-policy"
|
||||
```
|
||||
|
||||
In this example, `my-app-namespace` will adhere to the rules defined in the `VirtualClusterPolicy` named `standard-dev-policy`. Multiple Namespaces can be bound to the same policy for uniform configuration, or different Namespaces can be bound to distinct policies.
|
||||
|
||||
It's also important to note what happens when a Namespace's policy binding changes. If a Namespace is unbound from a VirtualClusterPolicy (by removing the policy.k3k.io/policy-name label), K3k will clean up and remove the resources (such as ResourceQuotas, LimitRanges, and managed Namespace labels) that were originally applied by that policy. Similarly, if the label is changed to bind the Namespace to a new VirtualClusterPolicy, K3k will first remove the resources associated with the old policy before applying the configurations from the new one, ensuring a clean transition.
|
||||
|
||||
### Default Policy Values
|
||||
|
||||
If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g., using `k3kcli policy create my-default-policy`), it will be created with default settings. Currently, this includes `spec.allowedMode` being set to `"shared"`.
|
||||
|
||||
```yaml
|
||||
# Example of a minimal VCP (after creation with defaults)
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: my-default-policy
|
||||
spec:
|
||||
allowedMode: shared
|
||||
```
|
||||
|
||||
## Key Capabilities & Examples
|
||||
|
||||
A `VirtualClusterPolicy` can configure several aspects of the Namespaces it's bound to and the virtual clusters operating within them.
|
||||
|
||||
### 1. Restricting Allowed Virtual Cluster Modes (`AllowedMode`)
|
||||
|
||||
You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster` resources can be provisioned within bound Namespaces. If a `Cluster` is created in a bound Namespace with a mode not allowed in `allowedMode`, its creation might proceed but an error should be reported in the `Cluster` resource's status.
|
||||
|
||||
**Example:** Allow only "shared" mode clusters.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: shared-only-policy
|
||||
spec:
|
||||
allowedModeTypes:
|
||||
- shared
|
||||
```
|
||||
|
||||
You can also specify this using the CLI: `k3kcli policy create --mode shared shared-only-policy` (or `--mode virtual`).
|
||||
|
||||
### 2. Defining Resource Quotas (`quota`)
|
||||
|
||||
You can define resource consumption limits for bound Namespaces by specifying a `ResourceQuota`. K3k will create a `ResourceQuota` object in each bound Namespace with the provided specifications.
|
||||
|
||||
**Example:** Set CPU, memory, and pod limits.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: quota-policy
|
||||
spec:
|
||||
quota:
|
||||
hard:
|
||||
cpu: "10"
|
||||
memory: "20Gi"
|
||||
pods: "10"
|
||||
```
|
||||
|
||||
### 3. Setting Limit Ranges (`limit`)
|
||||
|
||||
You can define default resource requests/limits and min/max constraints for containers running in bound Namespaces by specifying a `LimitRange`. K3k will create a `LimitRange` object in each bound Namespace.
|
||||
|
||||
**Example:** Define default CPU requests/limits and min/max CPU.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: limit-policy
|
||||
spec:
|
||||
limit:
|
||||
limits:
|
||||
- default:
|
||||
cpu: "500m"
|
||||
defaultRequest:
|
||||
cpu: "500m"
|
||||
max:
|
||||
cpu: "1"
|
||||
min:
|
||||
cpu: "100m"
|
||||
type: Container
|
||||
```
|
||||
|
||||
### 4. Managing Network Isolation (`disableNetworkPolicy`)
|
||||
|
||||
By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network isolation for virtual clusters (especially in shared mode). You can disable the creation of this default policy.
|
||||
|
||||
**Example:** Disable the default NetworkPolicy.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: no-default-netpol-policy
|
||||
spec:
|
||||
disableNetworkPolicy: true
|
||||
```
|
||||
|
||||
### 5. Enforcing Pod Security Admission (`podSecurityAdmissionLevel`)
|
||||
|
||||
You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admission (PSA) level. K3k will apply the corresponding PSA labels to each bound Namespace. The allowed values are `privileged`, `baseline`, `restricted`, and this will add labels like `pod-security.kubernetes.io/enforce: <level>` to the bound Namespace.
|
||||
|
||||
**Example:** Enforce the "baseline" PSS level.
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: baseline-psa-policy
|
||||
spec:
|
||||
podSecurityAdmissionLevel: baseline
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
* To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document.
|
||||
@@ -1,11 +1,9 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: ClusterSet
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: clusterset-example
|
||||
name: policy-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedNodeTypes:
|
||||
# - "shared"
|
||||
# - "virtual"
|
||||
# allowedMode: "shared"
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
# defaultPriorityClass: "lowpriority"
|
||||
|
||||
106
go.mod
106
go.mod
@@ -1,39 +1,55 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.23.4
|
||||
go 1.24.2
|
||||
|
||||
replace (
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.17.7
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.20.1
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.47.0
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.64.0
|
||||
golang.org/x/term => golang.org/x/term v0.15.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.36.0
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/rancher/dynamiclistener v1.27.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.35.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.0
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
|
||||
go.etcd.io/etcd/api/v3 v3.5.16
|
||||
go.etcd.io/etcd/client/v3 v3.5.16
|
||||
go.uber.org/zap v1.27.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.14.4
|
||||
k8s.io/api v0.29.11
|
||||
k8s.io/apimachinery v0.29.11
|
||||
k8s.io/apiserver v0.29.11
|
||||
k8s.io/client-go v0.29.11
|
||||
k8s.io/component-base v0.29.11
|
||||
k8s.io/api v0.31.4
|
||||
k8s.io/apiextensions-apiserver v0.31.4
|
||||
k8s.io/apimachinery v0.31.4
|
||||
k8s.io/apiserver v0.31.4
|
||||
k8s.io/cli-runtime v0.31.4
|
||||
k8s.io/client-go v0.31.4
|
||||
k8s.io/component-base v0.31.4
|
||||
k8s.io/component-helpers v0.31.4
|
||||
k8s.io/kubectl v0.31.4
|
||||
k8s.io/kubelet v0.31.4
|
||||
k8s.io/kubernetes v1.31.4
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.17.5
|
||||
sigs.k8s.io/controller-runtime v0.19.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -48,7 +64,6 @@ require (
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
@@ -62,7 +77,7 @@ require (
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
@@ -79,7 +94,8 @@ require (
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
@@ -96,12 +112,11 @@ require (
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
@@ -115,7 +130,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
@@ -150,63 +165,62 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
google.golang.org/grpc v1.67.3 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.11 // indirect
|
||||
k8s.io/cli-runtime v0.29.11 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.29.11 // indirect
|
||||
k8s.io/kms v0.31.4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kubectl v0.29.11 // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
set -x
|
||||
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
|
||||
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
|
||||
|
||||
K8S_VERSION=$(cat go.mod | grep -m1 "k8s.io/apiserver" | cut -d " " -f 2)
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=./code-generator
|
||||
|
||||
# cd into the git dir to checkout the code gen version compatible with the k8s version that this is using
|
||||
cd $CODEGEN_PKG
|
||||
git fetch origin tag ${K8S_VERSION}
|
||||
git checkout ${K8S_VERSION}
|
||||
cd -
|
||||
|
||||
source ${CODEGEN_PKG}/kube_codegen.sh
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
--input-pkg-root "${SCRIPT_ROOT}/pkg/apis" \
|
||||
--output-base "${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
rm -rf code-generator
|
||||
@@ -2,86 +2,36 @@ package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// config has all virtual-kubelet startup options
|
||||
type config struct {
|
||||
ClusterName string `yaml:"clusterName,omitempty"`
|
||||
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
|
||||
ServiceName string `yaml:"serviceName,omitempty"`
|
||||
Token string `yaml:"token,omitempty"`
|
||||
AgentHostname string `yaml:"agentHostname,omitempty"`
|
||||
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
|
||||
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
|
||||
KubeletPort string `yaml:"kubeletPort,omitempty"`
|
||||
ServerIP string `yaml:"serverIP,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
}
|
||||
|
||||
func (c *config) unmarshalYAML(data []byte) error {
|
||||
var conf config
|
||||
|
||||
if err := yaml.Unmarshal(data, &conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.ClusterName == "" {
|
||||
c.ClusterName = conf.ClusterName
|
||||
}
|
||||
if c.ClusterNamespace == "" {
|
||||
c.ClusterNamespace = conf.ClusterNamespace
|
||||
}
|
||||
if c.HostConfigPath == "" {
|
||||
c.HostConfigPath = conf.HostConfigPath
|
||||
}
|
||||
if c.VirtualConfigPath == "" {
|
||||
c.VirtualConfigPath = conf.VirtualConfigPath
|
||||
}
|
||||
if c.KubeletPort == "" {
|
||||
c.KubeletPort = conf.KubeletPort
|
||||
}
|
||||
if c.AgentHostname == "" {
|
||||
c.AgentHostname = conf.AgentHostname
|
||||
}
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = conf.ServiceName
|
||||
}
|
||||
if c.Token == "" {
|
||||
c.Token = conf.Token
|
||||
}
|
||||
if c.ServerIP == "" {
|
||||
c.ServerIP = conf.ServerIP
|
||||
}
|
||||
if c.Version == "" {
|
||||
c.Version = conf.Version
|
||||
}
|
||||
return nil
|
||||
ClusterName string `mapstructure:"clusterName"`
|
||||
ClusterNamespace string `mapstructure:"clusterNamespace"`
|
||||
ServiceName string `mapstructure:"serviceName"`
|
||||
Token string `mapstructure:"token"`
|
||||
AgentHostname string `mapstructure:"agentHostname"`
|
||||
HostKubeconfig string `mapstructure:"hostKubeconfig"`
|
||||
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
|
||||
KubeletPort int `mapstructure:"kubeletPort"`
|
||||
WebhookPort int `mapstructure:"webhookPort"`
|
||||
ServerIP string `mapstructure:"serverIP"`
|
||||
Version string `mapstructure:"version"`
|
||||
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.ClusterName == "" {
|
||||
return errors.New("cluster name is not provided")
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
return errors.New("cluster namespace is not provided")
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
return errors.New("agent Hostname is not provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) parse(path string) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.unmarshalYAML(b)
|
||||
}
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !c.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
var virtual corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
translated, err := c.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
var host corev1.ConfigMap
|
||||
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = c.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
if err = c.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
return c.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
c.objs = c.objs.Insert(objKey)
|
||||
c.mutex.Unlock()
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return c.removeHostConfigMap(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove configmap: %w", err)
|
||||
}
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
c.objs = c.objs.Delete(objKey)
|
||||
c.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vConfigMap corev1.ConfigMap
|
||||
err := c.VirtualClient.Get(ctx, types.NamespacedName{Namespace: virtualNamespace, Name: virtualName}, &vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
translated, err := c.TranslateFunc(&vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
return c.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type ControllerHandler struct {
|
||||
sync.RWMutex
|
||||
// Mgr is the manager used to run new controllers - from the virtual cluster
|
||||
Mgr manager.Manager
|
||||
// Scheme is the scheme used to run new controllers - from the virtual cluster
|
||||
Scheme runtime.Scheme
|
||||
// HostClient is the client used to communicate with the host cluster
|
||||
HostClient client.Client
|
||||
// VirtualClient is the client used to communicate with the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// Translator is the translator that will be used to adjust objects before they
|
||||
// are made on the host cluster
|
||||
Translator translate.ToHostTranslator
|
||||
// Logger is the logger that the controller will use to log errors
|
||||
Logger *k3klog.Logger
|
||||
// controllers are the controllers which are currently running
|
||||
controllers map[schema.GroupVersionKind]updateableReconciler
|
||||
}
|
||||
|
||||
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
|
||||
// be altered through the Add and Remove methods
|
||||
type updateableReconciler interface {
|
||||
reconcile.Reconciler
|
||||
AddResource(ctx context.Context, namespace string, name string) error
|
||||
RemoveResource(ctx context.Context, namespace string, name string) error
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
|
||||
c.RLock()
|
||||
controllers := c.controllers
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
c.RUnlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
// we need to manually lock/unlock since we intned on write locking to add a new controller
|
||||
c.RUnlock()
|
||||
var r updateableReconciler
|
||||
switch obj.(type) {
|
||||
case *v1.Secret:
|
||||
r = &SecretSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
|
||||
// note that this doesn't do any type safety - fix this
|
||||
// when generics work
|
||||
c.Translator.TranslateTo(s)
|
||||
// Remove service-account-token types when synced to the host
|
||||
if s.Type == v1.SecretTypeServiceAccountToken {
|
||||
s.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
case *v1.ConfigMap:
|
||||
r = &ConfigMapSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
|
||||
c.Translator.TranslateTo(s)
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
default:
|
||||
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
|
||||
// logic could be used for other types.
|
||||
return fmt.Errorf("unrecognized type: %T", obj)
|
||||
|
||||
}
|
||||
err := ctrl.NewControllerManagedBy(c.Mgr).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
c.Lock()
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
|
||||
c.Unlock()
|
||||
|
||||
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
|
||||
// since we aren't adding a new controller, we don't need to lock
|
||||
c.RLock()
|
||||
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
|
||||
c.RUnlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcController = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := PVCReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(pvcController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
hostPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handling persistent volume sync
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// getting the cluster for setting the controller reference
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
// create or update the pvc on host
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedPVC.Name, Namespace: r.clusterNamespace}, &hostPVC); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Create(ctx, syncedPVC)
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
log.Info("updating pvc on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, syncedPVC)
|
||||
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
return hostPVC
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type SecretSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !s.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
var virtual corev1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
translated, err := s.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
var host corev1.Secret
|
||||
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = s.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
if err = s.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
return s.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
s.mutex.Lock()
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
s.objs = s.objs.Insert(objKey)
|
||||
s.mutex.Unlock()
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return s.removeHostSecret(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove secret: %w", err)
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
s.objs = s.objs.Delete(objKey)
|
||||
s.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vSecret corev1.Secret
|
||||
err := s.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
translated, err := s.TranslateFunc(&vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
return s.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceSyncerController = "service-syncer-controller"
|
||||
maxConcurrentReconciles = 1
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
|
||||
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := ServiceReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(serviceSyncerController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.Service{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
var (
|
||||
virtService v1.Service
|
||||
hostService v1.Service
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
// getting the cluster for setting the controller reference
|
||||
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
syncedService := s.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtService.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
// create or update the service on host
|
||||
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
log.Info("updating service on the host cluster")
|
||||
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
|
||||
hostService := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostService)
|
||||
// don't sync finalizers to the host
|
||||
return hostService
|
||||
}
|
||||
150
k3k-kubelet/controller/syncer/configmap.go
Normal file
150
k3k-kubelet/controller/syncer/configmap.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
configMapControllerName = "configmap-syncer"
|
||||
configMapFinalizerName = "configmap.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) Name() string {
|
||||
return configMapControllerName
|
||||
}
|
||||
|
||||
// AddConfigMapSyncer adds configmap syncer controller to the manager of the virtual cluster
|
||||
func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := ConfigMapSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, configMapControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&corev1.ConfigMap{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for configMap Sync Config
|
||||
syncConfig := cluster.Spec.Sync.ConfigMaps
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualConfigMap corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
// handle deletion
|
||||
if !virtualConfigMap.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced configMap if exist
|
||||
if err := c.HostClient.Delete(ctx, syncedConfigMap); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced configMap
|
||||
if controllerutil.RemoveFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostConfigMap corev1.ConfigMap
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: syncedConfigMap.Name, Namespace: syncedConfigMap.Namespace}, &hostConfigMap); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ConfigMap for the first time on the host cluster")
|
||||
return reconcile.Result{}, c.HostClient.Create(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating ConfigMap on the host cluster")
|
||||
|
||||
return reconcile.Result{}, c.HostClient.Update(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
// translateConfigMap will translate a given configMap created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (c *ConfigMapSyncer) translateConfigMap(configMap *corev1.ConfigMap) *corev1.ConfigMap {
|
||||
hostConfigMap := configMap.DeepCopy()
|
||||
c.Translator.TranslateTo(hostConfigMap)
|
||||
|
||||
return hostConfigMap
|
||||
}
|
||||
243
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
243
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ConfigMapTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
ConfigMaps: v1alpha1.ConfigMapSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddConfigMapSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostConfigMap.Labels)
|
||||
})
|
||||
|
||||
It("updates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(configMap)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual configmap
|
||||
err = virtTestEnv.k8sClient.Update(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(configMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
|
||||
// check hostConfigMap
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostConfigMap.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a configMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(hostConfigMap.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not sync a configMap if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.ConfigMaps.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
GinkgoWriter.Printf("error: %v", err)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
161
k3k-kubelet/controller/syncer/ingress.go
Normal file
161
k3k-kubelet/controller/syncer/ingress.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ingressControllerName = "ingress-syncer-controller"
|
||||
ingressFinalizerName = "ingress.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type IngressReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddIngressSyncer adds ingress syncer controller to the manager of the virtual cluster
|
||||
func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := IngressReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, ingressControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&networkingv1.Ingress{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for ingressConfig
|
||||
syncConfig := cluster.Spec.Sync.Ingresses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
log.Info("reconciling ingress object")
|
||||
|
||||
var (
|
||||
virtIngress networkingv1.Ingress
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtIngress.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
|
||||
if controllerutil.AddFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the ingress on host
|
||||
var hostIngress networkingv1.Ingress
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedIngress.Name, Namespace: r.ClusterNamespace}, &hostIngress); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ingress for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating ingress on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
|
||||
hostIngress := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostIngress)
|
||||
|
||||
for _, rule := range hostIngress.Spec.Rules {
|
||||
// modify services in rules to point to the synced services
|
||||
if rule.HTTP != nil {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
if path.Backend.Service != nil {
|
||||
path.Backend.Service.Name = s.Translator.TranslateName(obj.GetNamespace(), path.Backend.Service.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// don't sync finalizers to the host
|
||||
return hostIngress
|
||||
}
|
||||
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var IngressTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Ingresses: v1alpha1.IngressSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostIngress.Labels)
|
||||
})
|
||||
|
||||
It("updates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
key := client.ObjectKeyFromObject(ingress)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "test-service-updated"
|
||||
|
||||
// update virtual ingress
|
||||
err = virtTestEnv.k8sClient.Update(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostIngress
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal(translateName(cluster, ingress.Namespace, "test-service-updated")))
|
||||
})
|
||||
|
||||
It("deletes a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not sync an Ingress if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Ingresses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcControllerName = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := PVCReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, pvcControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced pvc if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced pvc
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume claim for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
111
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
111
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var PVCTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
PersistentVolumeClaims: v1alpha1.PersistentVolumeClaimSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a pvc on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("test-sc"),
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
|
||||
|
||||
var hostPVC v1.PersistentVolumeClaim
|
||||
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPVCName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName))
|
||||
|
||||
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
|
||||
})
|
||||
}
|
||||
190
k3k-kubelet/controller/syncer/pod.go
Normal file
190
k3k-kubelet/controller/syncer/pod.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
podControllerName = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Pod{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If PVC syncing is disabled, only process deletions to allow for cleanup.
|
||||
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPod v1.Pod
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// reconcile pods with pvcs
|
||||
for _, vol := range virtPod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
log.Info("Handling pod with pvc")
|
||||
|
||||
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
|
||||
// and then created on the host, the PV is not synced to the host cluster.
|
||||
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var pvc v1.PersistentVolumeClaim
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: pvcSource.ClaimName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
orig := pv.DeepCopy()
|
||||
pv.Status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Patch the status of PersistentVolumeClaim to Bound")
|
||||
|
||||
pvcPatch := pvc.DeepCopy()
|
||||
if pvcPatch.Annotations == nil {
|
||||
pvcPatch.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
|
||||
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.VirtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
var storageClass string
|
||||
|
||||
if obj.Spec.StorageClassName != nil {
|
||||
storageClass = *obj.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: obj.Name,
|
||||
Labels: map[string]string{
|
||||
pseudoPVLabel: "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
volume.AnnBoundByController: "true",
|
||||
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolume",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "pseudopv",
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClass,
|
||||
VolumeMode: obj.Spec.VolumeMode,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
AccessModes: obj.Spec.AccessModes,
|
||||
Capacity: obj.Spec.Resources.Requests,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
APIVersion: obj.APIVersion,
|
||||
UID: obj.UID,
|
||||
ResourceVersion: obj.ResourceVersion,
|
||||
Kind: obj.Kind,
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
256
k3k-kubelet/controller/syncer/priority_class_test.go
Normal file
256
k3k-kubelet/controller/syncer/priority_class_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var PriorityClassTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
PriorityClasses: v1alpha1.PriorityClassSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddPriorityClassSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPriorityClass.Labels)
|
||||
})
|
||||
|
||||
It("updates a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(priorityClass)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
priorityClass.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual priorityClass
|
||||
err = virtTestEnv.k8sClient.Update(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(priorityClass.Labels).To(ContainElement("bar"))
|
||||
|
||||
// check hostPriorityClass
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostPriorityClass.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("creates a priorityClass on the host cluster with the globalDefault annotation", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
GlobalDefault: true,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster without the GlobalDefault value", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
|
||||
Expect(hostPriorityClass.Annotations[syncer.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
})
|
||||
|
||||
It("will not create a priorityClass on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.PriorityClasses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
174
k3k-kubelet/controller/syncer/priorityclass.go
Normal file
174
k3k-kubelet/controller/syncer/priorityclass.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
PriorityClassGlobalDefaultAnnotation = "priorityclass.k3k.io/globalDefault"
|
||||
|
||||
priorityClassControllerName = "priorityclass-syncer-controller"
|
||||
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PriorityClassSyncer struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPriorityClassSyncer adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PriorityClassSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, priorityClassControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&schedulingv1.PriorityClass{}).WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// IgnoreSystemPrefixPredicate filters out resources whose names start with "system-".
|
||||
var ignoreSystemPrefixPredicate = predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return !strings.HasPrefix(e.ObjectOld.GetName(), "system-")
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
}
|
||||
|
||||
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for priorityClassConfig
|
||||
syncConfig := cluster.Spec.Sync.PriorityClasses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
priorityClass schedulingv1.PriorityClass
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
// handle deletion
|
||||
if !priorityClass.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
// TODO add test for previous implementation without err != nil check, and also check the other controllers
|
||||
if err := r.HostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the priorityClass on the host
|
||||
log.Info("creating the priorityClass for the first time on the host cluster")
|
||||
|
||||
err := r.HostClient.Create(ctx, hostPriorityClass)
|
||||
if err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, hostPriorityClass)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *PriorityClassSyncer) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
hostPriorityClass := priorityClass.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPriorityClass)
|
||||
|
||||
if hostPriorityClass.Annotations == nil {
|
||||
hostPriorityClass.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if hostPriorityClass.GlobalDefault {
|
||||
hostPriorityClass.GlobalDefault = false
|
||||
hostPriorityClass.Annotations[PriorityClassGlobalDefaultAnnotation] = "true"
|
||||
}
|
||||
|
||||
return hostPriorityClass
|
||||
}
|
||||
155
k3k-kubelet/controller/syncer/secret.go
Normal file
155
k3k-kubelet/controller/syncer/secret.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
secretControllerName = "secret-syncer"
|
||||
secretFinalizerName = "secret.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type SecretSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) Name() string {
|
||||
return secretControllerName
|
||||
}
|
||||
|
||||
// AddSecretSyncer adds secret syncer controller to the manager of the virtual cluster
|
||||
func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := SecretSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, secretControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Secret{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *SecretSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for Secrets Sync Config
|
||||
syncConfig := cluster.Spec.Sync.Secrets
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualSecret v1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
// handle deletion
|
||||
if !virtualSecret.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced secret if exist
|
||||
if err := s.HostClient.Delete(ctx, syncedSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced secret
|
||||
if controllerutil.RemoveFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostSecret v1.Secret
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: syncedSecret.Name, Namespace: syncedSecret.Namespace}, &hostSecret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the Secret for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.HostClient.Create(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating Secret on the host cluster")
|
||||
|
||||
return reconcile.Result{}, s.HostClient.Update(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
// translateSecret will translate a given secret created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (s *SecretSyncer) translateSecret(secret *v1.Secret) *v1.Secret {
|
||||
hostSecret := secret.DeepCopy()
|
||||
|
||||
if hostSecret.Type == v1.SecretTypeServiceAccountToken {
|
||||
hostSecret.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
|
||||
s.Translator.TranslateTo(hostSecret)
|
||||
|
||||
return hostSecret
|
||||
}
|
||||
240
k3k-kubelet/controller/syncer/secret_test.go
Normal file
240
k3k-kubelet/controller/syncer/secret_test.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var SecretTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Secrets: v1alpha1.SecretSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostSecret.Labels)
|
||||
})
|
||||
|
||||
It("updates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(secret)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual secret
|
||||
err = virtTestEnv.k8sClient.Update(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(secret.Labels).To(ContainElement("bar"))
|
||||
|
||||
// check hostSecret
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostSecret.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(hostSecret.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not create a secret on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Secrets.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
152
k3k-kubelet/controller/syncer/service.go
Normal file
152
k3k-kubelet/controller/syncer/service.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceControllerName = "service-syncer-controller"
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
|
||||
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
reconciler := ServiceReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translator,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, serviceControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Service{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
virtService v1.Service
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtService.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the service on host
|
||||
var hostService v1.Service
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.ClusterNamespace}, &hostService); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedService)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating service on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for serviceSyncConfig
|
||||
syncConfig := cluster.Spec.Sync.Services
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
|
||||
hostService := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostService)
|
||||
// don't sync finalizers to the host
|
||||
return hostService
|
||||
}
|
||||
276
k3k-kubelet/controller/syncer/service_test.go
Normal file
276
k3k-kubelet/controller/syncer/service_test.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ServiceTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Services: v1alpha1.ServiceSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddServiceSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostService.Labels)
|
||||
})
|
||||
|
||||
It("updates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
key := client.ObjectKeyFromObject(service)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service.Spec.Ports[0].Name = "test-port-updated"
|
||||
|
||||
// update virtual service
|
||||
err = virtTestEnv.k8sClient.Update(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostService
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostService.Spec.Ports[0].Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal("test-port-updated"))
|
||||
})
|
||||
|
||||
It("deletes a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not create a service on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Services.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
type SyncerContext struct {
|
||||
ClusterName string
|
||||
ClusterNamespace string
|
||||
VirtualClient client.Client
|
||||
HostClient client.Client
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
184
k3k-kubelet/controller/syncer/syncer_suite_test.go
Normal file
184
k3k-kubelet/controller/syncer/syncer_suite_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Cluster Controller Suite")
|
||||
}
|
||||
|
||||
type TestEnv struct {
|
||||
*envtest.Environment
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
}
|
||||
|
||||
var (
|
||||
hostTestEnv *TestEnv
|
||||
hostManager ctrl.Manager
|
||||
virtTestEnv *TestEnv
|
||||
virtManager ctrl.Manager
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
hostTestEnv = NewTestEnv()
|
||||
By("HOST testEnv running at :" + hostTestEnv.ControlPlane.APIServer.Port)
|
||||
|
||||
virtTestEnv = NewTestEnv()
|
||||
By("VIRT testEnv running at :" + virtTestEnv.ControlPlane.APIServer.Port)
|
||||
|
||||
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
|
||||
ctrl.SetupSignalHandler()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
|
||||
err := hostTestEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = virtTestEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
|
||||
err = os.RemoveAll(tmpKubebuilderDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
func NewTestEnv() *TestEnv {
|
||||
GinkgoHelper()
|
||||
|
||||
binaryAssetsDirectory := os.Getenv("KUBEBUILDER_ASSETS")
|
||||
if binaryAssetsDirectory == "" {
|
||||
binaryAssetsDirectory = "/usr/local/kubebuilder/bin"
|
||||
}
|
||||
|
||||
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
|
||||
|
||||
if err := os.Mkdir(tmpKubebuilderDir, 0o755); !errors.Is(err, os.ErrExist) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tmpKubebuilderDir, "envtest-*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = os.CopyFS(tempDir, os.DirFS(binaryAssetsDirectory))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
BinaryAssetsDirectory: tempDir,
|
||||
Scheme: buildScheme(),
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8s, err := kubernetes.NewForConfig(cfg)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sClient, err := client.New(cfg, client.Options{Scheme: testEnv.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return &TestEnv{
|
||||
Environment: testEnv,
|
||||
k8s: k8s,
|
||||
k8sClient: k8sClient,
|
||||
}
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
}
|
||||
|
||||
var _ = Describe("Kubelet Controller", func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
|
||||
// disable the metrics server
|
||||
Metrics: metricsserver.Options{BindAddress: "0"},
|
||||
Scheme: hostTestEnv.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
virtManager, err = ctrl.NewManager(virtTestEnv.Config, ctrl.Options{
|
||||
// disable the metrics server
|
||||
Metrics: metricsserver.Options{BindAddress: "0"},
|
||||
Scheme: virtTestEnv.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err := hostManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err := virtManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
Describe("PriorityClass Syncer", PriorityClassTests)
|
||||
Describe("ConfigMap Syncer", ConfigMapTests)
|
||||
Describe("Secret Syncer", SecretTests)
|
||||
Describe("Service Syncer", ServiceTests)
|
||||
Describe("Ingress Syncer", IngressTests)
|
||||
Describe("PersistentVolumeClaim Syncer", PVCTests)
|
||||
})
|
||||
|
||||
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
@@ -7,24 +7,25 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
webhookName = "podmutator.k3k.io"
|
||||
webhookTimeout = int32(10)
|
||||
webhookPort = "9443"
|
||||
webhookPath = "/mutate--v1-pod"
|
||||
FieldpathField = "k3k.io/fieldpath"
|
||||
)
|
||||
@@ -32,17 +33,17 @@ const (
|
||||
type webhookHandler struct {
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
nodeName string
|
||||
serviceName string
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
logger *log.Logger
|
||||
webhookPort int
|
||||
}
|
||||
|
||||
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, serviceName string, logger *log.Logger) error {
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
@@ -50,7 +51,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
serviceName: serviceName,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
nodeName: nodeName,
|
||||
webhookPort: webhookPort,
|
||||
}
|
||||
|
||||
// create mutator webhook configuration to the cluster
|
||||
@@ -58,6 +59,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := handler.client.Create(ctx, config); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
@@ -72,14 +74,13 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
|
||||
}
|
||||
|
||||
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
|
||||
if pod.Spec.NodeName == "" {
|
||||
pod.Spec.NodeName = w.nodeName
|
||||
}
|
||||
// look for status.* fields in the env
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, env := range container.Env {
|
||||
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
|
||||
@@ -94,22 +95,26 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Infow("extracting webhook tls from host cluster")
|
||||
var (
|
||||
webhookTLSSecret v1.Secret
|
||||
)
|
||||
|
||||
var webhookTLSSecret v1.Secret
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
|
||||
if !ok {
|
||||
return nil, errors.New("webhook CABundle does not exist in secret")
|
||||
}
|
||||
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
|
||||
|
||||
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
|
||||
|
||||
return &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "admissionregistration.k8s.io/v1",
|
||||
@@ -156,10 +161,12 @@ func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
|
||||
if len(s) != 3 {
|
||||
return -1, "", errors.New("fieldpath annotation is not set correctly")
|
||||
}
|
||||
|
||||
containerIndex, err := strconv.Atoi(s[1])
|
||||
if err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
envName := s[2]
|
||||
|
||||
return containerIndex, envName, nil
|
||||
|
||||
@@ -8,10 +8,34 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
@@ -20,26 +44,6 @@ import (
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -71,7 +75,7 @@ type kubelet struct {
|
||||
}
|
||||
|
||||
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +86,8 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -92,10 +97,23 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
hostMetricsBindAddress := ":8083"
|
||||
virtualMetricsBindAddress := ":8084"
|
||||
|
||||
if c.MirrorHostNodes {
|
||||
hostMetricsBindAddress = "0"
|
||||
virtualMetricsBindAddress = "0"
|
||||
}
|
||||
|
||||
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
|
||||
Scheme: baseScheme,
|
||||
Scheme: baseScheme,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: c.ClusterNamespace,
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8083",
|
||||
BindAddress: hostMetricsBindAddress,
|
||||
},
|
||||
Cache: cache.Options{
|
||||
DefaultNamespaces: map[string]cache.Config{
|
||||
@@ -107,38 +125,39 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("unable to create controller-runtime mgr for host cluster: " + err.Error())
|
||||
}
|
||||
|
||||
virtualScheme := runtime.NewScheme()
|
||||
// virtual client will only use core types (for now), no need to add anything other than the basics
|
||||
err = clientgoscheme.AddToScheme(virtualScheme)
|
||||
if err != nil {
|
||||
virtualScheme := runtime.NewScheme()
|
||||
if err := clientgoscheme.AddToScheme(virtualScheme); err != nil {
|
||||
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: "/opt/rancher/k3k-webhook",
|
||||
Port: c.WebhookPort,
|
||||
})
|
||||
|
||||
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: "kube-system",
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8084",
|
||||
BindAddress: virtualMetricsBindAddress,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutator webhook")
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.AgentHostname, c.ServiceName, logger); err != nil {
|
||||
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
|
||||
return nil, errors.New("failed to add controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
@@ -148,6 +167,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
|
||||
// get the cluster's DNS IP to be injected to pods
|
||||
var dnsService v1.Service
|
||||
|
||||
dnsName := controller.SafeConcatNameWithPrefix(c.ClusterName, "kube-dns")
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: dnsName, Namespace: c.ClusterNamespace}, &dnsService); err != nil {
|
||||
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
|
||||
@@ -172,27 +192,36 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
logger: logger.Named(k3kKubeletName),
|
||||
token: c.Token,
|
||||
dnsIP: dnsService.Spec.ClusterIP,
|
||||
port: c.KubeletPort,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostClient ctrlruntimeclient.Client) (string, error) {
|
||||
var service v1.Service
|
||||
serviceKey := types.NamespacedName{Namespace: clusterNamespace, Name: serviceName}
|
||||
|
||||
serviceKey := types.NamespacedName{
|
||||
Namespace: clusterNamespace,
|
||||
Name: serviceName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, serviceKey, &service); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
|
||||
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
|
||||
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
|
||||
providerFunc := k.newProviderFunc(cfg)
|
||||
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
|
||||
if err != nil {
|
||||
return errors.New("unable to start kubelet: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -225,41 +254,47 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
|
||||
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
|
||||
}
|
||||
|
||||
<-k.node.Done()
|
||||
|
||||
if err := k.node.Err(); err != nil {
|
||||
k.logger.Fatalw("node stopped with an error", zap.Error(err))
|
||||
}
|
||||
|
||||
k.logger.Info("node exited successfully")
|
||||
}
|
||||
|
||||
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
|
||||
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
|
||||
}
|
||||
|
||||
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
|
||||
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
|
||||
|
||||
return utilProvider, &provider.Node{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
|
||||
// set up the routes
|
||||
mux := http.NewServeMux()
|
||||
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
|
||||
return errors.New("unable to attach routes: " + err.Error())
|
||||
}
|
||||
|
||||
c.Handler = mux
|
||||
|
||||
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
|
||||
c.TLSConfig = tlsConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -273,8 +308,11 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
@@ -285,20 +323,26 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
adminCert, adminKey, err := certs.CreateClientCertKey(
|
||||
controller.AdminCommonName, []string{user.SystemPrivilegedGroup},
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, time.Hour*24*time.Duration(356),
|
||||
controller.AdminCommonName,
|
||||
[]string{user.SystemPrivilegedGroup},
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
time.Hour*24*time.Duration(356),
|
||||
b.ClientCA.Content,
|
||||
b.ClientCAKey.Content)
|
||||
b.ClientCAKey.Content,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
|
||||
url := "https://" + server.ServiceName(cluster.Name)
|
||||
|
||||
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientcmd.RESTConfigFromKubeConfig(kubeconfigData)
|
||||
}
|
||||
|
||||
@@ -330,10 +374,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
cluster v1alpha1.Cluster
|
||||
b *bootstrap.ControlRuntimeBootstrap
|
||||
)
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
@@ -343,27 +390,35 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
// POD IP
|
||||
podIP := net.ParseIP(os.Getenv("POD_IP"))
|
||||
ip := net.ParseIP(agentIP)
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{hostname},
|
||||
IPs: []net.IP{ip},
|
||||
IPs: []net.IP{ip, podIP},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get cert and key: " + err.Error())
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get key pair: " + err.Error())
|
||||
}
|
||||
|
||||
// create rootCA CertPool
|
||||
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create ca certs: " + err.Error())
|
||||
}
|
||||
|
||||
if len(certs) < 1 {
|
||||
return nil, errors.New("ca cert is not parsed correctly")
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(certs[0])
|
||||
|
||||
@@ -372,3 +427,56 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: c.ClusterNamespace,
|
||||
Name: c.ClusterName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, objKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syncer.AddConfigMapSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add configmap global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
if err := syncer.AddSecretSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add secret global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := syncer.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding ingress syncer controller")
|
||||
|
||||
if err := syncer.AddIngressSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add ingress syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := syncer.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,14 +2,21 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -20,115 +27,101 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3k-kubelet"
|
||||
app.Usage = "virtual kubelet implementation k3k"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-name",
|
||||
Usage: "Name of the k3k cluster",
|
||||
Destination: &cfg.ClusterName,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-namespace",
|
||||
Usage: "Namespace of the k3k cluster",
|
||||
Destination: &cfg.ClusterNamespace,
|
||||
EnvVars: []string{"CLUSTER_NAMESPACE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-token",
|
||||
Usage: "K3S token of the k3k cluster",
|
||||
Destination: &cfg.Token,
|
||||
EnvVars: []string{"CLUSTER_TOKEN"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-config-path",
|
||||
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
|
||||
Destination: &cfg.HostConfigPath,
|
||||
EnvVars: []string{"HOST_KUBECONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "virtual-config-path",
|
||||
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
|
||||
Destination: &cfg.VirtualConfigPath,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubelet-port",
|
||||
Usage: "kubelet API port number",
|
||||
Destination: &cfg.KubeletPort,
|
||||
EnvVars: []string{"SERVER_PORT"},
|
||||
Value: "10250",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-name",
|
||||
Usage: "The service name deployed by the k3k controller",
|
||||
Destination: &cfg.ServiceName,
|
||||
EnvVars: []string{"SERVICE_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "agent-hostname",
|
||||
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
|
||||
Destination: &cfg.AgentHostname,
|
||||
EnvVars: []string{"AGENT_HOSTNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "server-ip",
|
||||
Usage: "Server IP used for registering the virtual kubelet to the cluster",
|
||||
Destination: &cfg.ServerIP,
|
||||
EnvVars: []string{"SERVER_IP"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "Version of kubernetes server",
|
||||
Destination: &cfg.Version,
|
||||
EnvVars: []string{"VERSION"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to k3k-kubelet config file",
|
||||
Destination: &configFile,
|
||||
EnvVars: []string{"CONFIG_FILE"},
|
||||
Value: "/etc/rancher/k3k/config.yaml",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"DEBUG"},
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3k-kubelet",
|
||||
Short: "virtual kubelet implementation k3k",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := InitializeConfig(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
return nil
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
return nil
|
||||
}
|
||||
app.Action = run
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
|
||||
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
if err := cfg.parse(configFile); err != nil {
|
||||
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
logger.Fatalw("failed to validate config", zap.Error(err))
|
||||
}
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
if err != nil {
|
||||
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
|
||||
return fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
|
||||
logger.Fatalw("failed to register new node", zap.Error(err))
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
|
||||
}
|
||||
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
|
||||
return fmt.Errorf("failed to register new node: %w", err)
|
||||
}
|
||||
|
||||
k.start(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializeConfig sets up viper to read from config file, environment variables, and flags.
|
||||
// It uses a `flatcase` convention for viper keys to match the (lowercased) config file keys,
|
||||
// while flags remain in kebab-case.
|
||||
func InitializeConfig(cmd *cobra.Command) error {
|
||||
var err error
|
||||
|
||||
// Bind every cobra flag to a viper key.
|
||||
// The viper key will be the flag name with dashes removed (flatcase).
|
||||
// e.g. "cluster-name" becomes "clustername"
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
configName := strings.ReplaceAll(f.Name, "-", "")
|
||||
envName := strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_"))
|
||||
|
||||
err = errors.Join(err, viper.BindPFlag(configName, f))
|
||||
err = errors.Join(err, viper.BindEnv(configName, envName))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile = viper.GetString("config")
|
||||
viper.SetConfigFile(configFile)
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
var notFoundErr viper.ConfigFileNotFoundError
|
||||
if errors.As(err, ¬FoundErr) || errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("no config file found: %w", err)
|
||||
} else {
|
||||
return fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal all configuration into the global cfg struct.
|
||||
// Viper correctly handles the precedence of flags > env > config.
|
||||
if err := viper.Unmarshal(&cfg); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal config: %w", err)
|
||||
}
|
||||
// Separately get the debug flag, as it's not part of the main config struct.
|
||||
debug = viper.GetBool("debug")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ package collectors
|
||||
import (
|
||||
"time"
|
||||
|
||||
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
// defining metrics
|
||||
@@ -91,14 +91,20 @@ var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
|
||||
|
||||
// DescribeWithStability implements compbasemetrics.StableCollector
|
||||
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
|
||||
ch <- nodeCPUUsageDesc
|
||||
ch <- nodeMemoryUsageDesc
|
||||
ch <- containerStartTimeDesc
|
||||
ch <- containerCPUUsageDesc
|
||||
ch <- containerMemoryUsageDesc
|
||||
ch <- podCPUUsageDesc
|
||||
ch <- podMemoryUsageDesc
|
||||
ch <- resourceScrapeResultDesc
|
||||
descs := []*compbasemetrics.Desc{
|
||||
nodeCPUUsageDesc,
|
||||
nodeMemoryUsageDesc,
|
||||
containerStartTimeDesc,
|
||||
containerCPUUsageDesc,
|
||||
containerMemoryUsageDesc,
|
||||
podCPUUsageDesc,
|
||||
podMemoryUsageDesc,
|
||||
resourceScrapeResultDesc,
|
||||
}
|
||||
|
||||
for _, desc := range descs {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// CollectWithStability implements compbasemetrics.StableCollector
|
||||
@@ -107,6 +113,7 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemet
|
||||
// custom collector in a way that only collects metrics for active containers.
|
||||
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) {
|
||||
var errorCount float64
|
||||
|
||||
defer func() {
|
||||
ch <- compbasemetrics.NewLazyConstMetric(resourceScrapeResultDesc, compbasemetrics.GaugeValue, errorCount)
|
||||
}()
|
||||
@@ -121,6 +128,7 @@ func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetri
|
||||
rc.collectContainerCPUMetrics(ch, pod, container)
|
||||
rc.collectContainerMemoryMetrics(ch, pod, container)
|
||||
}
|
||||
|
||||
rc.collectPodCPUMetrics(ch, pod)
|
||||
rc.collectPodMemoryMetrics(ch, pod)
|
||||
}
|
||||
|
||||
@@ -4,56 +4,71 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
|
||||
node.Status.Conditions = nodeConditions()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: hostname,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: ip,
|
||||
},
|
||||
}
|
||||
|
||||
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
|
||||
node.Labels["kubernetes.io/os"] = "linux"
|
||||
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
node.Status.NodeInfo.KubeProxyVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
}
|
||||
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
|
||||
ctx := context.Background()
|
||||
if mirrorHostNodes {
|
||||
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Fatal("error getting host node for mirroring", err)
|
||||
}
|
||||
}()
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
node.Status = *hostNode.Status.DeepCopy()
|
||||
node.Labels = hostNode.GetLabels()
|
||||
node.Annotations = hostNode.GetAnnotations()
|
||||
node.Finalizers = hostNode.GetFinalizers()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
} else {
|
||||
node.Status.Conditions = nodeConditions()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
node.Status.Addresses = []corev1.NodeAddress{
|
||||
{
|
||||
Type: corev1.NodeHostName,
|
||||
Address: hostname,
|
||||
},
|
||||
{
|
||||
Type: corev1.NodeInternalIP,
|
||||
Address: ip,
|
||||
},
|
||||
}
|
||||
|
||||
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
|
||||
node.Labels["kubernetes.io/os"] = "linux"
|
||||
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// nodeConditions returns the basic conditions which mark the node as ready
|
||||
func nodeConditions() []v1.NodeCondition {
|
||||
return []v1.NodeCondition{
|
||||
func nodeConditions() []corev1.NodeCondition {
|
||||
return []corev1.NodeCondition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: v1.ConditionTrue,
|
||||
Status: corev1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletReady",
|
||||
@@ -61,7 +76,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "OutOfDisk",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
@@ -69,7 +84,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "MemoryPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientMemory",
|
||||
@@ -77,7 +92,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "DiskPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasNoDiskPressure",
|
||||
@@ -85,7 +100,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "NetworkUnavailable",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "RouteCreated",
|
||||
@@ -96,9 +111,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
|
||||
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
|
||||
// If the nodeLabels are specified only the matching nodes will be considered.
|
||||
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -117,8 +130,9 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
|
||||
|
||||
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
|
||||
// If some node labels are specified only the matching nodes will be considered.
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if nodeLabels != nil {
|
||||
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
|
||||
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
|
||||
@@ -134,7 +148,6 @@ func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interfa
|
||||
virtualAvailableResources := corev1.ResourceList{}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
|
||||
// check if the node is Ready
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type != corev1.NodeReady {
|
||||
|
||||
@@ -3,44 +3,45 @@ package provider
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"errors"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
// check at compile time if the Provider implements the nodeutil.Provider interface
|
||||
@@ -49,10 +50,10 @@ var _ nodeutil.Provider = (*Provider)(nil)
|
||||
// Provider implements nodetuil.Provider from virtual Kubelet.
|
||||
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
|
||||
type Provider struct {
|
||||
Handler controller.ControllerHandler
|
||||
Translator translate.ToHostTranslator
|
||||
HostClient client.Client
|
||||
VirtualClient client.Client
|
||||
VirtualManager manager.Manager
|
||||
ClientConfig rest.Config
|
||||
CoreClient cv1.CoreV1Interface
|
||||
ClusterNamespace string
|
||||
@@ -62,9 +63,7 @@ type Provider struct {
|
||||
logger *k3klog.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
ErrRetryTimeout = errors.New("provider timed out")
|
||||
)
|
||||
var ErrRetryTimeout = errors.New("provider timed out")
|
||||
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
coreClient, err := cv1.NewForConfig(&hostConfig)
|
||||
@@ -78,16 +77,9 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
Handler: controller.ControllerHandler{
|
||||
Mgr: virtualMgr,
|
||||
Scheme: *virtualMgr.GetScheme(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
Translator: translator,
|
||||
Logger: logger,
|
||||
},
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
VirtualManager: virtualMgr,
|
||||
Translator: translator,
|
||||
ClientConfig: hostConfig,
|
||||
CoreClient: coreClient,
|
||||
@@ -110,24 +102,30 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
|
||||
Follow: opts.Follow,
|
||||
Previous: opts.Previous,
|
||||
}
|
||||
|
||||
if opts.Tail != 0 {
|
||||
tailLines := int64(opts.Tail)
|
||||
options.TailLines = &tailLines
|
||||
}
|
||||
|
||||
if opts.LimitBytes != 0 {
|
||||
limitBytes := int64(opts.LimitBytes)
|
||||
options.LimitBytes = &limitBytes
|
||||
}
|
||||
|
||||
if opts.SinceSeconds != 0 {
|
||||
sinceSeconds := int64(opts.SinceSeconds)
|
||||
options.SinceSeconds = &sinceSeconds
|
||||
}
|
||||
|
||||
if !opts.SinceTime.IsZero() {
|
||||
sinceTime := metav1.NewTime(opts.SinceTime)
|
||||
options.SinceTime = &sinceTime
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
|
||||
|
||||
return closer, err
|
||||
}
|
||||
|
||||
@@ -148,10 +146,12 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
@@ -179,10 +179,12 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
@@ -195,17 +197,19 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
}
|
||||
|
||||
// GetStatsSummary gets the stats for the node, including running pods
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary, error) {
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
|
||||
p.logger.Debug("GetStatsSummary")
|
||||
|
||||
nodeList := &v1.NodeList{}
|
||||
nodeList := &corev1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
|
||||
// fetch the stats from all the nodes
|
||||
var nodeStats statsv1alpha1.NodeStats
|
||||
var allPodsStats []statsv1alpha1.PodStats
|
||||
var (
|
||||
nodeStats stats.NodeStats
|
||||
allPodsStats []stats.PodStats
|
||||
)
|
||||
|
||||
for _, n := range nodeList.Items {
|
||||
res, err := p.CoreClient.RESTClient().
|
||||
@@ -222,7 +226,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
|
||||
)
|
||||
}
|
||||
|
||||
stats := &statsv1alpha1.Summary{}
|
||||
stats := &stats.Summary{}
|
||||
if err := json.Unmarshal(res, stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -239,15 +243,16 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podsNameMap := make(map[string]*v1.Pod)
|
||||
podsNameMap := make(map[string]*corev1.Pod)
|
||||
|
||||
for _, pod := range pods {
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
podsNameMap[hostPodName] = pod
|
||||
}
|
||||
|
||||
filteredStats := &statsv1alpha1.Summary{
|
||||
filteredStats := &stats.Summary{
|
||||
Node: nodeStats,
|
||||
Pods: make([]statsv1alpha1.PodStats, 0),
|
||||
Pods: make([]stats.PodStats, 0),
|
||||
}
|
||||
|
||||
for _, podStat := range allPodsStats {
|
||||
@@ -258,7 +263,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary,
|
||||
|
||||
// rewrite the PodReference to match the data of the virtual cluster
|
||||
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
|
||||
podStat.PodRef = statsv1alpha1.PodReference{
|
||||
podStat.PodRef = stats.PodReference{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
UID: string(pod.UID),
|
||||
@@ -284,6 +289,7 @@ func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily,
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
|
||||
}
|
||||
|
||||
return metricFamily, nil
|
||||
}
|
||||
|
||||
@@ -300,9 +306,9 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL())
|
||||
portAsString := strconv.Itoa(int(port))
|
||||
|
||||
readyChannel := make(chan struct{})
|
||||
stopChannel := make(chan struct{}, 1)
|
||||
|
||||
@@ -310,7 +316,6 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
|
||||
// so more work is needed to detect a close and handle that appropriately.
|
||||
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +330,14 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
// createPod takes a Kubernetes Pod and deploys it within the provider.
|
||||
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
tPod := pod.DeepCopy()
|
||||
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
|
||||
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
|
||||
var sourcePod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tPod := sourcePod.DeepCopy()
|
||||
p.Translator.TranslateTo(tPod)
|
||||
|
||||
// get Cluster definition
|
||||
@@ -333,7 +345,9 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.ClusterName,
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
@@ -349,45 +363,67 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if pod.Spec.Hostname == "" {
|
||||
tPod.Spec.Hostname = pod.Name
|
||||
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
|
||||
}
|
||||
|
||||
// if the priorityCluss for the virtual cluster is set then override the provided value
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
|
||||
if tPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
|
||||
tPod.Spec.PriorityClassName = tPriorityClassName
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
|
||||
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
|
||||
// host cluster version
|
||||
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// sync serviceaccount token to a the host cluster
|
||||
if err := p.transformTokens(ctx, pod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP)
|
||||
|
||||
p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name,
|
||||
"Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env)
|
||||
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
|
||||
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
p.logger.Infow("creating pod",
|
||||
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
}
|
||||
|
||||
// withRetry retries passed function with interval and timeout
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
|
||||
const (
|
||||
interval = 2 * time.Second
|
||||
timeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var allErrors error
|
||||
|
||||
// retryFn will retry until the operation succeed, or the timeout occurs
|
||||
retryFn := func(ctx context.Context) (bool, error) {
|
||||
if lastErr := f(ctx, pod); lastErr != nil {
|
||||
@@ -395,117 +431,54 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Po
|
||||
allErrors = errors.Join(allErrors, lastErr)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, retryFn); err != nil {
|
||||
return errors.Join(allErrors, ErrRetryTimeout)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
|
||||
// if one/more volumes couldn't be transformed
|
||||
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
|
||||
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
var optional bool
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
|
||||
if volume.ConfigMap != nil {
|
||||
if volume.ConfigMap.Optional != nil {
|
||||
optional = *volume.ConfigMap.Optional
|
||||
}
|
||||
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
|
||||
}
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
if volume.Secret.Optional != nil {
|
||||
optional = *volume.Secret.Optional
|
||||
}
|
||||
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
|
||||
}
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
if source.ConfigMap.Optional != nil {
|
||||
optional = *source.ConfigMap.Optional
|
||||
}
|
||||
configMapName := source.ConfigMap.Name
|
||||
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
|
||||
}
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
if source.Secret.Optional != nil {
|
||||
optional = *source.Secret.Optional
|
||||
}
|
||||
secretName := source.Secret.Name
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
}
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
|
||||
} else if volume.DownwardAPI != nil {
|
||||
for _, downwardAPI := range volume.DownwardAPI.Items {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
if downwardAPI.FieldRef != nil {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
|
||||
var configMap corev1.ConfigMap
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: configMapName,
|
||||
}
|
||||
err := p.VirtualClient.Get(ctx, nsName, &configMap)
|
||||
if err != nil {
|
||||
// check if its optional configmap
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
err = p.Handler.AddResource(ctx, &configMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
|
||||
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
|
||||
var secret corev1.Secret
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: secretName,
|
||||
}
|
||||
err := p.VirtualClient.Get(ctx, nsName, &secret)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
err = p.Handler.AddResource(ctx, &secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -514,7 +487,7 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.updatePod, pod)
|
||||
}
|
||||
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Debugw("got a request for update pod")
|
||||
|
||||
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
|
||||
@@ -522,11 +495,41 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
|
||||
// Update Pod in the virtual cluster
|
||||
|
||||
var currentVirtualPod v1.Pod
|
||||
var currentVirtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
|
||||
}
|
||||
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
|
||||
}
|
||||
|
||||
var currentHostPod corev1.Pod
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
|
||||
}
|
||||
|
||||
// Handle ephemeral containers
|
||||
if !cmp.Equal(currentHostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
|
||||
p.logger.Info("Updating ephemeral containers")
|
||||
|
||||
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, ¤tHostPod, metav1.UpdateOptions{}); err != nil {
|
||||
p.logger.Errorf("error when updating ephemeral containers: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(¤tVirtualPod, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
@@ -542,17 +545,6 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
// Update Pod in the host cluster
|
||||
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
|
||||
}
|
||||
|
||||
var currentHostPod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
|
||||
}
|
||||
|
||||
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
@@ -560,6 +552,10 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
// in the virtual cluster we can update also the labels and annotations
|
||||
maps.Copy(currentHostPod.Annotations, pod.Annotations)
|
||||
maps.Copy(currentHostPod.Labels, pod.Labels)
|
||||
|
||||
if err := p.HostClient.Update(ctx, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
|
||||
}
|
||||
@@ -568,7 +564,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
// updateContainerImages will update the images of the original container images with the same name
|
||||
func updateContainerImages(original, updated []v1.Container) []v1.Container {
|
||||
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
|
||||
newImages := make(map[string]string)
|
||||
|
||||
for _, c := range updated {
|
||||
@@ -595,70 +591,14 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Infof("Got request to delete pod %s", pod.Name)
|
||||
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
|
||||
// note that we don't return an error here. The pod was successfully deleted, another process
|
||||
// should clean this without affecting the user
|
||||
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Deleted pod %s", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
|
||||
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
|
||||
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
|
||||
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
|
||||
// for pruning
|
||||
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
|
||||
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
|
||||
var pods corev1.PodList
|
||||
// only pods in the same namespace could be using secrets/configmaps that this pod is using
|
||||
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
|
||||
Namespace: pod.Namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
for _, vPod := range pods.Items {
|
||||
if vPod.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
|
||||
pruneSecrets.Delete(secrets...)
|
||||
pruneConfigMap.Delete(configMaps...)
|
||||
}
|
||||
for _, secretName := range pruneSecrets.UnsortedList() {
|
||||
var secret corev1.Secret
|
||||
err := p.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: pod.Namespace,
|
||||
}, &secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
err = p.Handler.RemoveResource(ctx, &secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
}
|
||||
for _, configMapName := range pruneConfigMap.UnsortedList() {
|
||||
var configMap corev1.ConfigMap
|
||||
err := p.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Name: configMapName,
|
||||
Namespace: pod.Namespace,
|
||||
}, &configMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -672,12 +612,15 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(namespace, name),
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
err := p.HostClient.Get(ctx, hostNamespaceName, &pod)
|
||||
if err != nil {
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
|
||||
return nil, fmt.Errorf("error when retrieving pod: %w", err)
|
||||
}
|
||||
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
|
||||
return &pod, nil
|
||||
}
|
||||
|
||||
@@ -687,11 +630,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
|
||||
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
|
||||
|
||||
pod, err := p.GetPod(ctx, namespace, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get pod for status: %w", err)
|
||||
}
|
||||
|
||||
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
|
||||
|
||||
return pod.Status.DeepCopy(), nil
|
||||
}
|
||||
|
||||
@@ -701,160 +647,135 @@ func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*c
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
selector := labels.NewSelector()
|
||||
|
||||
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create label selector: %w", err)
|
||||
}
|
||||
|
||||
selector = selector.Add(*requirement)
|
||||
|
||||
var podList corev1.PodList
|
||||
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
retPods := []*corev1.Pod{}
|
||||
|
||||
for _, pod := range podList.DeepCopy().Items {
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
retPods = append(retPods, &pod)
|
||||
}
|
||||
|
||||
return retPods, nil
|
||||
}
|
||||
|
||||
// configureNetworking will inject network information to each pod to connect them to the
|
||||
// virtual cluster api server, as well as confiugre DNS information to connect them to the
|
||||
// synced coredns on the host cluster.
|
||||
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) {
|
||||
func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP string) {
|
||||
// inject serverIP to hostalias for the pod
|
||||
KubernetesHostAlias := corev1.HostAlias{
|
||||
IP: serverIP,
|
||||
Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"},
|
||||
}
|
||||
pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias)
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env,
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP_ADDR",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_HOST",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_PORT",
|
||||
Value: "6443",
|
||||
},
|
||||
)
|
||||
}
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env,
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT",
|
||||
Value: "tcp://" + p.serverIP + ":6443",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_PORT_443_TCP_ADDR",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_HOST",
|
||||
Value: p.serverIP,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "KUBERNETES_SERVICE_PORT",
|
||||
Value: "6443",
|
||||
},
|
||||
)
|
||||
}
|
||||
pod.Spec.HostAliases = append(pod.Spec.HostAliases, corev1.HostAlias{
|
||||
IP: serverIP,
|
||||
Hostnames: []string{
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
},
|
||||
})
|
||||
|
||||
// injecting cluster DNS IP to the pods except for coredns pod
|
||||
if !strings.HasPrefix(podName, "coredns") {
|
||||
if !strings.HasPrefix(podName, "coredns") && pod.Spec.DNSConfig == nil {
|
||||
pod.Spec.DNSPolicy = corev1.DNSNone
|
||||
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
|
||||
Nameservers: []string{
|
||||
p.dnsIP,
|
||||
dnsIP,
|
||||
},
|
||||
Searches: []string{
|
||||
podNamespace + ".svc.cluster.local", "svc.cluster.local", "cluster.local",
|
||||
podNamespace + ".svc.cluster.local",
|
||||
"svc.cluster.local",
|
||||
"cluster.local",
|
||||
},
|
||||
Options: []corev1.PodDNSConfigOption{
|
||||
{
|
||||
Name: "ndots",
|
||||
Value: ptr.To("5"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
updatedEnvVars := []corev1.EnvVar{
|
||||
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
|
||||
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
|
||||
}
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = mergeEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = mergeEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle ephemeral containers as well
|
||||
for i := range pod.Spec.EphemeralContainers {
|
||||
pod.Spec.EphemeralContainers[i].Env = mergeEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
}
|
||||
|
||||
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
|
||||
// for removing/seeing which virtual cluster resources need to be in the host cluster.
|
||||
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
var secrets []string
|
||||
var configMaps []string
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Secret != nil {
|
||||
secrets = append(secrets, volume.Secret.SecretName)
|
||||
} else if volume.ConfigMap != nil {
|
||||
configMaps = append(configMaps, volume.ConfigMap.Name)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
configMaps = append(configMaps, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
secrets = append(secrets, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
// mergeEnvVars will override the orig environment variables if found in the updated list and will add them to the list if not found
|
||||
func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
if len(updated) == 0 {
|
||||
return orig
|
||||
}
|
||||
|
||||
// create map for single lookup
|
||||
updatedEnvVarMap := make(map[string]corev1.EnvVar)
|
||||
for _, updatedEnvVar := range updated {
|
||||
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
|
||||
}
|
||||
|
||||
for i, env := range orig {
|
||||
if updatedEnv, ok := updatedEnvVarMap[env.Name]; ok {
|
||||
orig[i] = updatedEnv
|
||||
// Remove the updated variable from the map
|
||||
delete(updatedEnvVarMap, env.Name)
|
||||
}
|
||||
}
|
||||
return secrets, configMaps
|
||||
|
||||
// Any variables remaining in the map are new and should be appended to the original slice.
|
||||
for _, env := range updatedEnvVarMap {
|
||||
orig = append(orig, env)
|
||||
}
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
for _, container := range pod.Spec.EphemeralContainers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
// override metadata.name and metadata.namespace with pod annotations
|
||||
for i, container := range pod.Spec.InitContainers {
|
||||
for j, envVar := range container.Env {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, envVar := range container.Env {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
|
||||
for name, value := range pod.Annotations {
|
||||
if strings.Contains(name, webhook.FieldpathField) {
|
||||
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
|
||||
@@ -862,10 +783,10 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
// re-adding these envs to the pod
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
|
||||
Name: envName,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: value,
|
||||
},
|
||||
},
|
||||
@@ -874,5 +795,25 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
delete(tPod.Annotations, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
|
||||
for j, envVar := range envVars {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
envVars[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
envVars[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
70
k3k-kubelet/provider/provider_test.go
Normal file
70
k3k-kubelet/provider/provider_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_mergeEnvVars(t *testing.T) {
|
||||
type args struct {
|
||||
orig []corev1.EnvVar
|
||||
new []corev1.EnvVar
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []corev1.EnvVar
|
||||
}{
|
||||
{
|
||||
name: "orig and new are empty",
|
||||
args: args{
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{},
|
||||
},
|
||||
want: []corev1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "only orig is empty",
|
||||
args: args{
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
args: args{
|
||||
orig: []corev1.EnvVar{{Name: "FOO", Value: "old_val"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig have multiple elements",
|
||||
args: args{
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
{
|
||||
name: "orig and new have multiple elements and some not matching",
|
||||
args: args{
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := mergeEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("mergeEnvVars() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,12 +32,14 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
}
|
||||
|
||||
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
|
||||
|
||||
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// extracting the tokens data from the secret we just created
|
||||
virtualSecretKey := types.NamespacedName{
|
||||
Name: virtualSecret.Name,
|
||||
@@ -49,9 +53,11 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
if len(virtualSecret.Data) < 3 {
|
||||
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
|
||||
}
|
||||
|
||||
hostSecret := virtualSecret.DeepCopy()
|
||||
hostSecret.Type = ""
|
||||
hostSecret.Annotations = make(map[string]string)
|
||||
|
||||
p.Translator.TranslateTo(hostSecret)
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
|
||||
@@ -59,7 +65,9 @@ func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) e
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.translateToken(tPod, hostSecret.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,6 +104,7 @@ func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -103,6 +112,7 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
|
||||
for i, volume := range pod.Spec.Volumes {
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// init containers
|
||||
@@ -110,6 +120,17 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
|
||||
for j, mountPath := range container.VolumeMounts {
|
||||
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ephemeral containers
|
||||
for i, container := range pod.Spec.EphemeralContainers {
|
||||
for j, mountPath := range container.VolumeMounts {
|
||||
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.EphemeralContainers[i].VolumeMounts = append(pod.Spec.EphemeralContainers[i].VolumeMounts[:j], pod.Spec.EphemeralContainers[i].VolumeMounts[j+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -118,13 +139,15 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
|
||||
for j, mountPath := range container.VolumeMounts {
|
||||
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
|
||||
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
|
||||
Name: tokenVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
|
||||
@@ -17,9 +17,9 @@ func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
newSize := remotecommand.TerminalSize{
|
||||
|
||||
return &remotecommand.TerminalSize{
|
||||
Width: size.Width,
|
||||
Height: size.Height,
|
||||
}
|
||||
return &newSize
|
||||
}
|
||||
|
||||
@@ -2,10 +2,11 @@ package translate
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,14 +46,17 @@ func (t *ToHostTranslator) TranslateTo(obj client.Object) {
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
|
||||
annotations[ResourceNameAnnotation] = obj.GetName()
|
||||
annotations[ResourceNamespaceAnnotation] = obj.GetNamespace()
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
// add a label to quickly identify objects owned by a given virtual cluster
|
||||
labels := obj.GetLabels()
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
labels[ClusterNameLabel] = t.ClusterName
|
||||
obj.SetLabels(labels)
|
||||
|
||||
@@ -77,6 +81,7 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
|
||||
// In this case, we need to have some sort of fallback or error return
|
||||
name := annotations[ResourceNameAnnotation]
|
||||
namespace := annotations[ResourceNamespaceAnnotation]
|
||||
|
||||
obj.SetName(name)
|
||||
obj.SetNamespace(namespace)
|
||||
delete(annotations, ResourceNameAnnotation)
|
||||
@@ -91,20 +96,32 @@ func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
|
||||
// resource version/UID won't match what's in the virtual cluster.
|
||||
obj.SetResourceVersion("")
|
||||
obj.SetUID("")
|
||||
|
||||
}
|
||||
|
||||
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
|
||||
func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
|
||||
var names []string
|
||||
|
||||
// some resources are not namespaced (i.e. priorityclasses)
|
||||
/// for these resources we skip the namespace to avoid having a name like: prioritclass--cluster-123
|
||||
if namespace == "" {
|
||||
names = []string{name, t.ClusterName}
|
||||
} else {
|
||||
names = []string{name, namespace, t.ClusterName}
|
||||
}
|
||||
|
||||
// we need to come up with a name which is:
|
||||
// - somewhat connectable to the original resource
|
||||
// - a valid k8s name
|
||||
// - idempotently calculatable
|
||||
// - unique for this combination of name/namespace/cluster
|
||||
namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName)
|
||||
|
||||
namePrefix := strings.Join(names, "-")
|
||||
|
||||
// use + as a separator since it can't be in an object name
|
||||
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
|
||||
nameKey := strings.Join(names, "+")
|
||||
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
|
||||
nameSuffix := hex.EncodeToString([]byte(nameKey))
|
||||
|
||||
return controller.SafeConcatName(namePrefix, nameSuffix)
|
||||
}
|
||||
|
||||
160
main.go
160
main.go
@@ -1,4 +1,4 @@
|
||||
//go:generate ./hack/update-codegen.sh
|
||||
//go:generate ./scripts/generate
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -6,66 +6,39 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/clusterset"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
clusterCIDR string
|
||||
sharedAgentImage string
|
||||
sharedAgentImagePullPolicy string
|
||||
kubeconfig string
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "Kubeconfig path",
|
||||
Destination: &kubeconfig,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
EnvVars: []string{"CLUSTER_CIDR"},
|
||||
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
|
||||
Destination: &clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-image",
|
||||
EnvVars: []string{"SHARED_AGENT_IMAGE"},
|
||||
Usage: "K3K Virtual Kubelet image",
|
||||
Value: "rancher/k3k:latest",
|
||||
Destination: &sharedAgentImage,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-pull-policy",
|
||||
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
|
||||
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
|
||||
Destination: &sharedAgentImagePullPolicy,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
Usage: "Debug level logging",
|
||||
Destination: &debug,
|
||||
},
|
||||
}
|
||||
scheme = runtime.NewScheme()
|
||||
config cluster.Config
|
||||
kubeconfig string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -74,24 +47,43 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Flags = flags
|
||||
app.Action = run
|
||||
app.Version = buildinfo.Version
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if err := validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.New(debug)
|
||||
return nil
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3k",
|
||||
Short: "k3k controller",
|
||||
Version: buildinfo.Version,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return validate()
|
||||
},
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
cmds.InitializeConfig(cmd)
|
||||
logger = log.New(debug)
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
|
||||
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "shared-agent-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "shared-agent-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "virtual-agent-image", "rancher/k3s", "K3S Virtual Agent image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "virtual-agent-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.AgentImagePullSecrets, "agent-image-pull-secret", nil, "Image pull secret used for for agents")
|
||||
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logger.Fatalw("failed to run k3k controller", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
logger.Info("Starting k3k - Version: " + buildinfo.Version)
|
||||
|
||||
@@ -103,7 +95,6 @@ func run(clx *cli.Context) error {
|
||||
mgr, err := ctrl.NewManager(restConfig, manager.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
|
||||
}
|
||||
@@ -111,41 +102,50 @@ func run(clx *cli.Context) error {
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
|
||||
|
||||
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
|
||||
if err := mgr.Add(runnable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding etcd pod controller")
|
||||
if err := cluster.AddPodController(ctx, mgr); err != nil {
|
||||
|
||||
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterset controller")
|
||||
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset controller: %v", err)
|
||||
}
|
||||
logger.Info("adding clusterpolicy controller")
|
||||
|
||||
if clusterCIDR == "" {
|
||||
logger.Info("adding networkpolicy node controller")
|
||||
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
|
||||
}
|
||||
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
|
||||
}
|
||||
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start the manager: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("controller manager stopped")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate() error {
|
||||
if sharedAgentImagePullPolicy != "" {
|
||||
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
if config.SharedAgentImagePullPolicy != "" {
|
||||
if config.SharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
return errors.New("invalid value for shared agent image policy")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user