mirror of
https://github.com/rancher/k3k.git
synced 2026-02-17 11:30:08 +00:00
Compare commits
4 Commits
chart-0.3.
...
addons_fea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e426380828 | ||
|
|
ecdec030fd | ||
|
|
5e55b87c02 | ||
|
|
79c7b8d36d |
137
.drone.yml
Normal file
137
.drone.yml
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
kind: pipeline
|
||||
name: amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: rancher/dapper:v0.5.0
|
||||
environment:
|
||||
GITHUB_TOKEN:
|
||||
from_secret: github_token
|
||||
commands:
|
||||
- dapper ci
|
||||
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
|
||||
volumes:
|
||||
- name: docker
|
||||
path: /var/run/docker.sock
|
||||
when:
|
||||
branch:
|
||||
exclude:
|
||||
- k3k-chart
|
||||
|
||||
- name: package-chart
|
||||
image: rancher/dapper:v0.5.0
|
||||
environment:
|
||||
GITHUB_TOKEN:
|
||||
from_secret: github_token
|
||||
commands:
|
||||
- dapper package-chart
|
||||
volumes:
|
||||
- name: docker
|
||||
path: /var/run/docker.sock
|
||||
when:
|
||||
branch:
|
||||
- k3k-chart
|
||||
instance:
|
||||
- drone-publish.rancher.io
|
||||
|
||||
- name: release-chart
|
||||
image: rancher/dapper:v0.5.0
|
||||
environment:
|
||||
GITHUB_TOKEN:
|
||||
from_secret: github_token
|
||||
commands:
|
||||
- dapper release-chart
|
||||
volumes:
|
||||
- name: docker
|
||||
path: /var/run/docker.sock
|
||||
when:
|
||||
branch:
|
||||
- k3k-chart
|
||||
instance:
|
||||
- drone-publish.rancher.io
|
||||
|
||||
- name: github_binary_release
|
||||
image: ibuildthecloud/github-release:v0.0.1
|
||||
settings:
|
||||
api_key:
|
||||
from_secret: github_token
|
||||
prerelease: true
|
||||
checksum:
|
||||
- sha256
|
||||
checksum_file: CHECKSUMsum-amd64.txt
|
||||
checksum_flatten: true
|
||||
files:
|
||||
- "bin/*"
|
||||
when:
|
||||
instance:
|
||||
- drone-publish.rancher.io
|
||||
ref:
|
||||
- refs/head/master
|
||||
- refs/tags/*
|
||||
event:
|
||||
- tag
|
||||
branch:
|
||||
exclude:
|
||||
- k3k-chart
|
||||
|
||||
- name: docker-publish
|
||||
image: plugins/docker
|
||||
settings:
|
||||
dockerfile: package/Dockerfile
|
||||
password:
|
||||
from_secret: docker_password
|
||||
repo: "rancher/k3k"
|
||||
username:
|
||||
from_secret: docker_username
|
||||
when:
|
||||
instance:
|
||||
- drone-publish.rancher.io
|
||||
ref:
|
||||
- refs/head/master
|
||||
- refs/tags/*
|
||||
event:
|
||||
- tag
|
||||
branch:
|
||||
exclude:
|
||||
- k3k-chart
|
||||
|
||||
volumes:
|
||||
- name: docker
|
||||
host:
|
||||
path: /var/run/docker.sock
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: manifest
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: push-runtime-manifest
|
||||
image: plugins/manifest
|
||||
settings:
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
spec: manifest-runtime.tmpl
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
instance:
|
||||
- drone-publish.rancher.io
|
||||
ref:
|
||||
- refs/head/master
|
||||
- refs/tags/*
|
||||
branch:
|
||||
exclude:
|
||||
- k3k-chart
|
||||
depends_on:
|
||||
- amd64
|
||||
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,41 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve K3K! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
|
||||
**Environmental Info:**
|
||||
Host Cluster Version:
|
||||
<!-- For example K3S v1.32.1+k3s1 or RKE2 v1.31.5+rke2r1 -->
|
||||
|
||||
Node(s) CPU architecture, OS, and Version:
|
||||
<!-- Provide the output from "uname -a" on the node(s) -->
|
||||
|
||||
Host Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "1 servers, 2 agents CNI: Flannel". -->
|
||||
|
||||
K3K Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
|
||||
|
||||
**Describe the bug:**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**Steps To Reproduce:**
|
||||
- Created a cluster with `k3k create`:
|
||||
|
||||
**Expected behavior:**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**Actual behavior:**
|
||||
<!-- A clear and concise description of what actually happened. -->
|
||||
|
||||
**Additional context / logs:**
|
||||
<!-- Add any other context and/or logs about the problem here. -->
|
||||
<!-- kubectl logs -n k3k-system -l app.kubernetes.io/instance=k3k -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> k3k-<cluster-name>-server-0 -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> -l cluster=<cluster-name>,mode=shared # in shared mode -->
|
||||
37
.github/workflows/build.yml
vendored
37
.github/workflows/build.yml
vendored
@@ -1,37 +0,0 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: v2
|
||||
args: --clean --snapshot
|
||||
env:
|
||||
REPO: ${{ github.repository }}
|
||||
REGISTRY:
|
||||
|
||||
45
.github/workflows/chart.yml
vendored
45
.github/workflows/chart.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: Chart
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
- "chart-*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
chart-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check tag
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//")
|
||||
chart_tag=$(yq .version charts/k3k/Chart.yaml)
|
||||
|
||||
echo pushed_tag=${pushed_tag} chart_tag=${chart_tag}
|
||||
[ "${pushed_tag}" == "${chart_tag}" ]
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
with:
|
||||
config: .cr.yaml
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
61
.github/workflows/release-delete.yml
vendored
61
.github/workflows/release-delete.yml
vendored
@@ -1,61 +0,0 @@
|
||||
name: Release - Delete Draft
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
type: string
|
||||
description: The tag of the release
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
jobs:
|
||||
release-delete:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check tag
|
||||
if: inputs.tag == ''
|
||||
run: echo "::error::Missing tag from input" && exit 1
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if release is draft
|
||||
run: |
|
||||
CURRENT_TAG=${{ inputs.tag }}
|
||||
isDraft=$(gh release view ${CURRENT_TAG} --json isDraft --jq ".isDraft")
|
||||
if [ "$isDraft" = true ]; then
|
||||
echo "Release ${CURRENT_TAG} is draft"
|
||||
else
|
||||
echo "::error::Cannot delete non-draft release" && exit 1
|
||||
fi
|
||||
|
||||
- name: Delete packages from Github Container Registry
|
||||
run: |
|
||||
CURRENT_TAG=${{ inputs.tag }}
|
||||
echo "Deleting packages with tag ${CURRENT_TAG}"
|
||||
|
||||
JQ_QUERY=".[] | select(.metadata.container.tags[] == \"${CURRENT_TAG}\")"
|
||||
|
||||
for package in k3k k3k-kubelet
|
||||
do
|
||||
echo "Deleting ${package} image"
|
||||
PACKAGE_TO_DELETE=$(gh api /user/packages/container/${package}/versions --jq "${JQ_QUERY}")
|
||||
echo $PACKAGE_TO_DELETE | jq
|
||||
|
||||
PACKAGE_ID=$(echo $PACKAGE_TO_DELETE | jq .id)
|
||||
echo "Deleting ${PACKAGE_ID}"
|
||||
gh api --method DELETE /user/packages/container/${package}/versions/${PACKAGE_ID}
|
||||
done
|
||||
|
||||
- name: Delete Github release
|
||||
run: |
|
||||
CURRENT_TAG=${{ inputs.tag }}
|
||||
echo "Deleting release ${CURRENT_TAG}"
|
||||
gh release delete ${CURRENT_TAG}
|
||||
90
.github/workflows/release.yml
vendored
90
.github/workflows/release.yml
vendored
@@ -1,90 +0,0 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
commit:
|
||||
type: string
|
||||
description: Checkout a specific commit
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Checkout code at the specific commit
|
||||
if: inputs.commit != ''
|
||||
run: git checkout ${{ inputs.commit }}
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: "Read secrets"
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
if: github.repository_owner == 'rancher'
|
||||
with:
|
||||
secrets: |
|
||||
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ;
|
||||
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD ;
|
||||
|
||||
# Manually dispatched workflows (or forks) will use ghcr.io
|
||||
- name: Setup ghcr.io
|
||||
if: github.event_name == 'workflow_dispatch' || github.repository_owner != 'rancher'
|
||||
run: |
|
||||
echo "REGISTRY=ghcr.io" >> $GITHUB_ENV
|
||||
echo "DOCKER_USERNAME=${{ github.actor }}" >> $GITHUB_ENV
|
||||
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ env.DOCKER_USERNAME }}
|
||||
password: ${{ env.DOCKER_PASSWORD }}
|
||||
|
||||
# If the tag does not exists the workflow was manually triggered.
|
||||
# That means we are creating temporary nightly builds, with a "fake" local tag
|
||||
- name: Check release tag
|
||||
id: release-tag
|
||||
run: |
|
||||
CURRENT_TAG=$(git describe --tag --always --match="v[0-9]*")
|
||||
|
||||
if git show-ref --tags ${CURRENT_TAG} --quiet; then
|
||||
echo "tag ${CURRENT_TAG} already exists";
|
||||
else
|
||||
echo "tag ${CURRENT_TAG} does not exist"
|
||||
git tag ${CURRENT_TAG}
|
||||
fi
|
||||
|
||||
echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: v2
|
||||
args: --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
GORELEASER_CURRENT_TAG: ${{ steps.release-tag.outputs.CURRENT_TAG }}
|
||||
REGISTRY: ${{ env.REGISTRY }}
|
||||
REPO: ${{ github.repository }}
|
||||
92
.github/workflows/test.yaml
vendored
92
.github/workflows/test.yaml
vendored
@@ -1,92 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v1.60
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
- name: Run unit tests
|
||||
run: make test-unit
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -4,7 +4,4 @@
|
||||
/dist
|
||||
*.swp
|
||||
.idea
|
||||
.vscode/
|
||||
__debug*
|
||||
*-kubeconfig.yaml
|
||||
.envtest
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
linters:
|
||||
enable:
|
||||
# default linters
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- unused
|
||||
|
||||
# extra
|
||||
- misspell
|
||||
- wsl
|
||||
148
.goreleaser.yaml
148
.goreleaser.yaml
@@ -1,148 +0,0 @@
|
||||
version: 2
|
||||
|
||||
release:
|
||||
draft: true
|
||||
replace_existing_draft: true
|
||||
prerelease: auto
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go generate ./...
|
||||
|
||||
builds:
|
||||
- id: k3k
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
- "s390x"
|
||||
ldflags:
|
||||
- -w -s # strip debug info and symbol table
|
||||
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
|
||||
|
||||
- id: k3k-kubelet
|
||||
main: ./k3k-kubelet
|
||||
binary: k3k-kubelet
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
- "s390x"
|
||||
ldflags:
|
||||
- -w -s # strip debug info and symbol table
|
||||
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
|
||||
|
||||
- id: k3kcli
|
||||
main: ./cli
|
||||
binary: k3kcli
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goarch:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
ldflags:
|
||||
- -w -s # strip debug info and symbol table
|
||||
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
|
||||
|
||||
archives:
|
||||
- format: binary
|
||||
name_template: >-
|
||||
{{ .Binary }}-{{- .Os }}-{{ .Arch }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
# For the image_templates we are using the following expression to build images for the correct registry
|
||||
# {{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}
|
||||
#
|
||||
# REGISTRY= -> rancher/k3k:vX.Y.Z
|
||||
# REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z
|
||||
#
|
||||
dockers:
|
||||
# k3k amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k
|
||||
- k3kcli
|
||||
dockerfile: "package/Dockerfile.k3k"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K=k3k"
|
||||
- "--build-arg=BIN_K3KCLI=k3kcli"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
# k3k-kubelet amd64
|
||||
- use: buildx
|
||||
goarch: amd64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- use: buildx
|
||||
goarch: arm64
|
||||
ids:
|
||||
- k3k-kubelet
|
||||
dockerfile: "package/Dockerfile.k3k-kubelet"
|
||||
skip_push: false
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
build_flag_templates:
|
||||
- "--build-arg=BIN_K3K_KUBELET=k3k-kubelet"
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
docker_manifests:
|
||||
# k3k
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64"
|
||||
|
||||
# k3k-kubelet arm64
|
||||
- name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}"
|
||||
image_templates:
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64"
|
||||
- "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64"
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
24
Dockerfile.dapper
Normal file
24
Dockerfile.dapper
Normal file
@@ -0,0 +1,24 @@
|
||||
ARG GOLANG=rancher/hardened-build-base:v1.20.6b2
|
||||
FROM ${GOLANG}
|
||||
|
||||
ARG DAPPER_HOST_ARCH
|
||||
ENV ARCH $DAPPER_HOST_ARCH
|
||||
|
||||
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
|
||||
RUN if [ "${ARCH}" == "amd64" ]; then \
|
||||
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
|
||||
fi
|
||||
|
||||
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
|
||||
&& mv cr /bin/
|
||||
|
||||
ENV GO111MODULE on
|
||||
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
|
||||
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
|
||||
ENV DAPPER_OUTPUT ./bin ./dist ./deploy
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV HOME ${DAPPER_SOURCE}
|
||||
WORKDIR ${DAPPER_SOURCE}
|
||||
|
||||
ENTRYPOINT ["./ops/entry"]
|
||||
CMD ["ci"]
|
||||
113
Makefile
113
Makefile
@@ -1,106 +1,15 @@
|
||||
TARGETS := $(shell ls ops)
|
||||
|
||||
REPO ?= rancher
|
||||
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
.dapper:
|
||||
@echo Downloading dapper
|
||||
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
|
||||
@@chmod +x .dapper.tmp
|
||||
@./.dapper.tmp -v
|
||||
@mv .dapper.tmp .dapper
|
||||
|
||||
## Dependencies
|
||||
$(TARGETS): .dapper
|
||||
./.dapper $@
|
||||
|
||||
GOLANGCI_LINT_VERSION := v1.63.4
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
ENVTEST_VERSION ?= latest
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
.DEFAULT_GOAL := default
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
CONTROLLER_GEN ?= go run sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: version build-crds build package ## Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
|
||||
.PHONY: version
|
||||
version: ## Print the current version
|
||||
@echo $(VERSION)
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
@VERSION=$(VERSION) ./scripts/build
|
||||
|
||||
.PHONY: package
|
||||
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
|
||||
|
||||
.PHONY: package-%
|
||||
package-%:
|
||||
docker build -f package/Dockerfile.$* \
|
||||
-t $(REPO)/$*:$(VERSION) \
|
||||
-t $(REPO)/$*:latest \
|
||||
-t $(REPO)/$*:dev .
|
||||
|
||||
.PHONY: push
|
||||
push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry
|
||||
|
||||
.PHONY: push-%
|
||||
push-%:
|
||||
docker push $(REPO)/$*:$(VERSION)
|
||||
docker push $(REPO)/$*:latest
|
||||
docker push $(REPO)/$*:dev
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) -v -r --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
$(GINKGO) -v -r --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) -v -r pkg/controller
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) -v -r tests
|
||||
|
||||
.PHONY: build-crds
|
||||
build-crds: ## Build the CRDs specs
|
||||
@# This will return non-zero until all of our objects in ./pkg/apis can generate valid crds.
|
||||
@# allowDangerousTypes is needed for struct that use floats
|
||||
$(CONTROLLER_GEN) crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \
|
||||
paths=./pkg/apis/... \
|
||||
output:crd:dir=./charts/k3k/crds
|
||||
|
||||
.PHONY: docs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml --renderer=markdown --source-path=./pkg/apis/k3k.io/v1alpha1 --output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: validate
|
||||
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git status --porcelain
|
||||
git --no-pager diff --exit-code
|
||||
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set image.repository=$(REPO)/k3k \
|
||||
--set image.tag=$(VERSION) \
|
||||
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set sharedAgent.image.tag=$(VERSION) \
|
||||
k3k ./charts/k3k/
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help.
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}'
|
||||
.PHONY: $(TARGETS)
|
||||
|
||||
193
README.md
193
README.md
@@ -1,183 +1,58 @@
|
||||
# K3k: Kubernetes in Kubernetes
|
||||
# K3K
|
||||
|
||||
[](https://shields.io/)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3k)
|
||||

|
||||

|
||||
A Kubernetes in Kubernetes tool, k3k provides a way to run multiple embedded isolated k3s clusters on your kubernetes cluster.
|
||||
|
||||
## Example
|
||||
|
||||
K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host. K3k offers both "shared" mode, optimizing resource utilization, and "virtual" mode, providing complete isolation with dedicated K3s server pods. This allows you to access a full Kubernetes experience without the overhead of managing separate physical resources.
|
||||
|
||||
K3k integrates seamlessly with Rancher for simplified management of your embedded clusters.
|
||||
|
||||
|
||||
**Experimental Tool**
|
||||
|
||||
This project is still under development and is considered experimental. It may have limitations, bugs, or changes. Please use with caution and report any issues you encounter. We appreciate your feedback as we continue to refine and improve this tool.
|
||||
|
||||
|
||||
## Features and Benefits
|
||||
|
||||
- **Resource Isolation:** Ensure workload isolation and prevent resource contention between teams or applications. K3k allows you to define resource limits and quotas for each embedded cluster, guaranteeing that one team's workloads won't impact another's performance.
|
||||
|
||||
- **Simplified Multi-Tenancy:** Easily create dedicated Kubernetes environments for different users or projects, simplifying access control and management. Provide each team with their own isolated cluster, complete with its own namespaces, RBAC, and resource quotas, without the complexity of managing multiple physical clusters.
|
||||
|
||||
- **Lightweight and Fast:** Leverage the lightweight nature of K3s to spin up and tear down clusters quickly, accelerating development and testing cycles. Spin up a new K3k cluster in seconds, test your application in a clean environment, and tear it down just as quickly, streamlining your CI/CD pipeline.
|
||||
|
||||
- **Optimized Resource Utilization (Shared Mode):** Maximize your infrastructure investment by running multiple K3s clusters on the same physical host. K3k's shared mode allows you to efficiently share underlying resources, reducing overhead and minimizing costs.
|
||||
|
||||
- **Complete Isolation (Virtual Mode):** For enhanced security and isolation, K3k's virtual mode provides dedicated K3s server pods for each embedded cluster. This ensures complete separation of workloads and eliminates any potential resource contention or security risks.
|
||||
|
||||
- **Rancher Integration:** Simplify the management of your K3k clusters with Rancher. Leverage Rancher's intuitive UI and powerful features to monitor, manage, and scale your embedded clusters with ease.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
This section provides instructions on how to install K3k and the `k3kcli`.
|
||||
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* [Helm](https://helm.sh) must be installed to use the charts. Please refer to Helm's [documentation](https://helm.sh/docs) to get started.
|
||||
* An existing [RKE2](https://docs.rke2.io/install/quickstart) Kubernetes cluster (recommended).
|
||||
* A configured storage provider with a default storage class.
|
||||
|
||||
**Note:** If you do not have a storage provider, you can configure the cluster to use ephemeral or static storage. Please consult the [k3kcli advance usage](./docs/advanced-usage.md#using-the-cli) for instructions on using these options.
|
||||
|
||||
### Install the K3k controller
|
||||
|
||||
1. Add the K3k Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add k3k https://rancher.github.io/k3k
|
||||
helm repo update
|
||||
```
|
||||
|
||||
2. Install the K3k controller:
|
||||
|
||||
```bash
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
```
|
||||
|
||||
**NOTE:** K3k is currently under development, so the chart is marked as a development chart. This means you need to add the `--devel` flag to install it. For production use, keep an eye on releases for stable versions. We recommend using the latest released version when possible.
|
||||
|
||||
|
||||
### Install the `k3kcli`
|
||||
|
||||
The `k3kcli` provides a quick and easy way to create K3k clusters and automatically exposes them via a kubeconfig.
|
||||
|
||||
To install it, simply download the latest available version for your architecture from the GitHub Releases page.
|
||||
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.0/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.3.0
|
||||
```
|
||||
An example on creating a k3k cluster on an RKE2 host using k3kcli
|
||||
|
||||
[](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
|
||||
|
||||
## Usage
|
||||
|
||||
This section provides examples of how to use the `k3kcli` to manage your K3k clusters.
|
||||
K3K consists of a controller and a cli tool, the controller can be deployed via a helm chart and the cli can be downloaded from the releases page.
|
||||
|
||||
**K3k operates within the context of your currently configured `kubectl` context.** This means that K3k respects the standard Kubernetes mechanisms for context configuration, including the `--kubeconfig` flag, the `$KUBECONFIG` environment variable, and the default `$HOME/.kube/config` file. Any K3k clusters you create will reside within the Kubernetes cluster that your `kubectl` is currently pointing to.
|
||||
### Deploy Controller
|
||||
|
||||
[Helm](https://helm.sh) must be installed to use the charts. Please refer to
|
||||
Helm's [documentation](https://helm.sh/docs) to get started.
|
||||
|
||||
### Creating a K3k Cluster
|
||||
Once Helm has been set up correctly, add the repo as follows:
|
||||
|
||||
To create a new K3k cluster, use the following command:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create mycluster
|
||||
```
|
||||
> [!NOTE]
|
||||
> **Creating a K3k Cluster on a Rancher-Managed Host Cluster**
|
||||
>
|
||||
> If your *host* Kubernetes cluster is managed by Rancher (e.g., your kubeconfig's `server` address includes a Rancher URL), use the `--kubeconfig-server` flag when creating your K3k cluster:
|
||||
>
|
||||
>```bash
|
||||
>k3kcli cluster create --kubeconfig-server <host_node_IP_or_load_balancer_IP> mycluster
|
||||
>```
|
||||
>
|
||||
> This ensures the generated kubeconfig connects to the correct endpoint.
|
||||
|
||||
When the K3s server is ready, `k3kcli` will generate the necessary kubeconfig file and print instructions on how to use it.
|
||||
|
||||
Here's an example of the output:
|
||||
|
||||
```bash
|
||||
INFO[0000] Creating a new cluster [mycluster]
|
||||
INFO[0000] Extracting Kubeconfig for [mycluster] cluster
|
||||
INFO[0000] waiting for cluster to be available..
|
||||
INFO[0073] certificate CN=system:admin,O=system:masters signed by CN=k3s-client-ca@1738746570: notBefore=2025-02-05 09:09:30 +0000 UTC notAfter=2026-02-05 09:10:42 +0000 UTC
|
||||
INFO[0073] You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=/my/current/directory/mycluster-kubeconfig.yaml
|
||||
kubectl cluster-info
|
||||
```sh
|
||||
helm repo add k3k https://rancher.github.io/k3k
|
||||
```
|
||||
|
||||
After exporting the generated kubeconfig, you should be able to reach your Kubernetes cluster:
|
||||
If you had already added this repo earlier, run `helm repo update` to retrieve
|
||||
the latest versions of the packages. You can then run `helm search repo
|
||||
k3k --devel` to see the charts.
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=/my/current/directory/mycluster-kubeconfig.yaml
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
To install the k3k chart:
|
||||
|
||||
```sh
|
||||
helm install my-k3k k3k/k3k --devel
|
||||
```
|
||||
|
||||
You can also directly create a Cluster resource in some namespace, to create a K3k cluster:
|
||||
To uninstall the chart:
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
EOF
|
||||
```sh
|
||||
helm delete my-k3k
|
||||
```
|
||||
|
||||
and use the `k3kcli` to retrieve the kubeconfig:
|
||||
**NOTE: Since k3k is still under development, the chart is marked as a development chart, this means that you need to add the `--devel` flag to install it.**
|
||||
|
||||
```bash
|
||||
k3kcli kubeconfig generate --namespace k3k-mycluster --name mycluster
|
||||
### Create a new cluster
|
||||
|
||||
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
|
||||
|
||||
```sh
|
||||
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha6/k3kcli
|
||||
chmod +x k3kcli
|
||||
sudo cp k3kcli /usr/local/bin
|
||||
```
|
||||
|
||||
To create a new cluster you can use:
|
||||
|
||||
### Deleting a K3k Cluster
|
||||
|
||||
To delete a K3k cluster, use the following command:
|
||||
|
||||
```bash
|
||||
k3kcli cluster delete mycluster
|
||||
```sh
|
||||
k3k cluster create --name example-cluster --token test
|
||||
```
|
||||
|
||||
|
||||
## Architecture
|
||||
|
||||
For a detailed explanation of the K3k architecture, please refer to the [Architecture documentation](./docs/architecture.md).
|
||||
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
For more in-depth examples and information on advanced K3k usage, including details on shared vs. virtual modes, resource management, and other configuration options, please see the [Advanced Usage documentation](./docs/advanced-usage.md).
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
If you're interested in building K3k from source or contributing to the project, please refer to the [Development documentation](./docs/development.md).
|
||||
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2014-2025 [SUSE](http://rancher.com/)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.2
|
||||
appVersion: v0.3.2
|
||||
version: 0.1.0-r1
|
||||
appVersion: 0.0.0-alpha6
|
||||
|
||||
102
charts/k3k/crds/cluster.yaml
Normal file
102
charts/k3k/crds/cluster.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusters.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
version:
|
||||
type: string
|
||||
servers:
|
||||
type: integer
|
||||
agents:
|
||||
type: integer
|
||||
token:
|
||||
type: string
|
||||
clusterCIDR:
|
||||
type: string
|
||||
serviceCIDR:
|
||||
type: string
|
||||
clusterDNS:
|
||||
type: string
|
||||
serverArgs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
agentArgs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
tlsSANs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
persistence:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
default: "ephermal"
|
||||
storageClassName:
|
||||
type: string
|
||||
storageRequestSize:
|
||||
type: string
|
||||
addons:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
secretNamespace:
|
||||
type: string
|
||||
secretRef:
|
||||
type: string
|
||||
expose:
|
||||
type: object
|
||||
properties:
|
||||
ingress:
|
||||
type: object
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
ingressClassName:
|
||||
type: string
|
||||
loadbalancer:
|
||||
type: object
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
nodePort:
|
||||
type: object
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
status:
|
||||
type: object
|
||||
properties:
|
||||
overrideClusterCIDR:
|
||||
type: boolean
|
||||
clusterCIDR:
|
||||
type: string
|
||||
overrideServiceCIDR:
|
||||
type: boolean
|
||||
serviceCIDR:
|
||||
type: string
|
||||
clusterDNS:
|
||||
type: string
|
||||
scope: Cluster
|
||||
names:
|
||||
plural: clusters
|
||||
singular: cluster
|
||||
kind: Cluster
|
||||
@@ -1,316 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: clusters.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: Cluster
|
||||
listKind: ClusterList
|
||||
plural: clusters
|
||||
singular: cluster
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the Cluster.
|
||||
properties:
|
||||
addons:
|
||||
description: Addons specifies secrets containing raw YAML to deploy
|
||||
on cluster startup.
|
||||
items:
|
||||
description: Addon specifies a Secret containing YAML to be deployed
|
||||
on cluster startup.
|
||||
properties:
|
||||
secretNamespace:
|
||||
description: SecretNamespace is the namespace of the Secret.
|
||||
type: string
|
||||
secretRef:
|
||||
description: SecretRef is the name of the Secret.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
agentArgs:
|
||||
description: |-
|
||||
AgentArgs specifies ordered key-value pairs for K3s agent pods.
|
||||
Example: ["--node-name=my-agent-node"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
agents:
|
||||
default: 0
|
||||
description: |-
|
||||
Agents specifies the number of K3s pods to run in agent (worker) mode.
|
||||
Must be 0 or greater. Defaults to 0.
|
||||
This field is ignored in "shared" mode.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: invalid value for agents
|
||||
rule: self >= 0
|
||||
clusterCIDR:
|
||||
description: |-
|
||||
ClusterCIDR is the CIDR range for pod IPs.
|
||||
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
clusterDNS:
|
||||
description: |-
|
||||
ClusterDNS is the IP address for the CoreDNS service.
|
||||
Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
expose:
|
||||
description: |-
|
||||
Expose specifies options for exposing the API server.
|
||||
By default, it's only exposed as a ClusterIP.
|
||||
properties:
|
||||
ingress:
|
||||
description: Ingress specifies options for exposing the API server
|
||||
through an Ingress.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Annotations specifies annotations to add to the
|
||||
Ingress.
|
||||
type: object
|
||||
ingressClassName:
|
||||
description: IngressClassName specifies the IngressClass to
|
||||
use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadbalancer:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
type: object
|
||||
nodePort:
|
||||
description: NodePort specifies options for exposing the API server
|
||||
through NodePort.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
serverPort:
|
||||
description: |-
|
||||
ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
servicePort:
|
||||
description: |-
|
||||
ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
If not specified, a port will be allocated (default: 30000-32767).
|
||||
format: int32
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
mode:
|
||||
allOf:
|
||||
- enum:
|
||||
- shared
|
||||
- virtual
|
||||
- enum:
|
||||
- shared
|
||||
- virtual
|
||||
default: shared
|
||||
description: |-
|
||||
Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
Defaults to "shared". This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: object
|
||||
persistence:
|
||||
default:
|
||||
type: dynamic
|
||||
description: |-
|
||||
Persistence specifies options for persisting etcd data.
|
||||
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
A default StorageClass is required for dynamic persistence.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
priorityClass:
|
||||
description: |-
|
||||
PriorityClass specifies the priorityClassName for server/agent pods.
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: string
|
||||
serverArgs:
|
||||
description: |-
|
||||
ServerArgs specifies ordered key-value pairs for K3s server pods.
|
||||
Example: ["--tls-san=example.com"]
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server nodes.
|
||||
type: object
|
||||
servers:
|
||||
default: 1
|
||||
description: |-
|
||||
Servers specifies the number of K3s pods to run in server (control plane) mode.
|
||||
Must be at least 1. Defaults to 1.
|
||||
format: int32
|
||||
type: integer
|
||||
x-kubernetes-validations:
|
||||
- message: cluster must have at least one server
|
||||
rule: self >= 1
|
||||
serviceCIDR:
|
||||
description: |-
|
||||
ServiceCIDR is the CIDR range for service IPs.
|
||||
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: serviceCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tokenSecretRef:
|
||||
description: |-
|
||||
TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
The Secret must have a "token" field in its data.
|
||||
properties:
|
||||
name:
|
||||
description: name is unique within a namespace to reference a
|
||||
secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret
|
||||
name must be unique.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
version:
|
||||
description: |-
|
||||
Version is the K3s version to use for the virtual nodes.
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
If not specified, the Kubernetes version of the host node will be used.
|
||||
type: string
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
properties:
|
||||
clusterCIDR:
|
||||
description: ClusterCIDR is the CIDR range for pod IPs.
|
||||
type: string
|
||||
clusterDNS:
|
||||
description: ClusterDNS is the IP address for the CoreDNS service.
|
||||
type: string
|
||||
hostVersion:
|
||||
description: HostVersion is the Kubernetes version of the host node.
|
||||
type: string
|
||||
persistence:
|
||||
description: Persistence specifies options for persisting etcd data.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for service IPs.
|
||||
type: string
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -1,331 +0,0 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.14.0
|
||||
name: clustersets.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
names:
|
||||
kind: ClusterSet
|
||||
listKind: ClusterSetList
|
||||
plural: clustersets
|
||||
singular: clusterset
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.displayName
|
||||
name: Display Name
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
It allows defining common configurations and constraints for the clusters within the set.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the ClusterSet.
|
||||
properties:
|
||||
allowedModeTypes:
|
||||
default:
|
||||
- shared
|
||||
description: AllowedModeTypes specifies the allowed cluster provisioning
|
||||
modes. Defaults to [shared].
|
||||
items:
|
||||
description: ClusterMode is the possible provisioning mode of a
|
||||
Cluster.
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the set.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the set.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
displayName:
|
||||
description: DisplayName is the human-readable name for the set.
|
||||
type: string
|
||||
limit:
|
||||
description: |-
|
||||
Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
|
||||
to set defaults and constraints (min/max)
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- limits
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
quota:
|
||||
description: Quota specifies the resource limits for clusters within
|
||||
a clusterset.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: |-
|
||||
hard is the set of desired hard limits for each named resource.
|
||||
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: |-
|
||||
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
|
||||
but expressed using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: |-
|
||||
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
|
||||
that relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents a scope's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
scopes:
|
||||
description: |-
|
||||
A collection of filters that must match each object tracked by a quota.
|
||||
If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the ClusterSet.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource.\n---\nThis struct is intended for
|
||||
direct use as an array at the field path .status.conditions. For
|
||||
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
|
||||
observations of a foo's current state.\n\t // Known .status.conditions.type
|
||||
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
|
||||
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
|
||||
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
|
||||
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
|
||||
\ // other fields\n\t}"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
---
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
|
||||
useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
description: Summary is a summary of the status.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Name must match 'default'
|
||||
rule: self.metadata.name == "default"
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ include "k3k.fullname" . }}
|
||||
labels:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.image.replicaCount }}
|
||||
selector:
|
||||
@@ -16,21 +16,11 @@ spec:
|
||||
{{- include "k3k.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: {{ .Chart.Name }}
|
||||
env:
|
||||
- name: CLUSTER_CIDR
|
||||
value: {{ .Values.host.clusterCIDR }}
|
||||
- name: SHARED_AGENT_IMAGE
|
||||
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
|
||||
- name: SHARED_AGENT_PULL_POLICY
|
||||
value: {{ .Values.sharedAgent.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 9443
|
||||
name: https-webhook
|
||||
protocol: TCP
|
||||
serviceAccountName: {{ include "k3k.serviceAccountName" . }}
|
||||
serviceAccountName: {{ include "k3k.serviceAccountName" . }}
|
||||
4
charts/k3k/templates/naemspace.yaml
Normal file
4
charts/k3k/templates/naemspace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Values.namespace }}
|
||||
@@ -11,27 +11,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "k3k.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- "nodes"
|
||||
- "nodes/proxy"
|
||||
verbs:
|
||||
- "get"
|
||||
- "list"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
namespace: {{ .Values.namespace }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: k3k-webhook
|
||||
labels:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
name: https-webhook
|
||||
targetPort: 9443
|
||||
selector:
|
||||
{{- include "k3k.selectorLabels" . | nindent 6 }}
|
||||
@@ -5,5 +5,5 @@ metadata:
|
||||
name: {{ include "k3k.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
namespace: {{ .Values.namespace }}
|
||||
{{- end }}
|
||||
@@ -1,29 +1,19 @@
|
||||
replicaCount: 1
|
||||
namespace: k3k-system
|
||||
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
repository: briandowns/k3k
|
||||
pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "dev"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy for clustersets, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# configuration related to the shared agent mode in k3k
|
||||
sharedAgent:
|
||||
image:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewClusterCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
25
cli/cmds/cluster/cluster.go
Normal file
25
cli/cmds/cluster/cluster.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var clusterSubcommands = []cli.Command{
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
SkipFlagParsing: false,
|
||||
SkipArgReorder: true,
|
||||
Action: createCluster,
|
||||
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
|
||||
},
|
||||
}
|
||||
|
||||
func NewClusterCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: clusterSubcommands,
|
||||
}
|
||||
}
|
||||
327
cli/cmds/cluster/create.go
Normal file
327
cli/cmds/cluster/create.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
backoff = wait.Backoff{
|
||||
Steps: 5,
|
||||
Duration: 3 * time.Second,
|
||||
Factor: 2,
|
||||
Jitter: 0.1,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
name string
|
||||
token string
|
||||
clusterCIDR string
|
||||
serviceCIDR string
|
||||
servers int64
|
||||
agents int64
|
||||
serverArgs cli.StringSlice
|
||||
agentArgs cli.StringSlice
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
version string
|
||||
|
||||
clusterCreateFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "name of the cluster",
|
||||
Destination: &name,
|
||||
},
|
||||
cli.Int64Flag{
|
||||
Name: "servers",
|
||||
Usage: "number of servers",
|
||||
Destination: &servers,
|
||||
Value: 1,
|
||||
},
|
||||
cli.Int64Flag{
|
||||
Name: "agents",
|
||||
Usage: "number of agents",
|
||||
Destination: &agents,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "token of the cluster",
|
||||
Destination: &token,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
Usage: "cluster CIDR",
|
||||
Destination: &clusterCIDR,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "service-cidr",
|
||||
Usage: "service CIDR",
|
||||
Destination: &serviceCIDR,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "persistence-type",
|
||||
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
|
||||
Value: cluster.EphermalNodesType,
|
||||
Destination: &persistenceType,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "storage-class-name",
|
||||
Usage: "Storage class name for dynamic persistence type",
|
||||
Destination: &storageClassName,
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Value: &serverArgs,
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Value: &agentArgs,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "k3s version",
|
||||
Destination: &version,
|
||||
Value: "v1.26.1-k3s1",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func createCluster(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
if err := validateCreateFlags(clx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Creating a new cluster [%s]", name)
|
||||
cluster := newCluster(
|
||||
name,
|
||||
token,
|
||||
int32(servers),
|
||||
int32(agents),
|
||||
clusterCIDR,
|
||||
serviceCIDR,
|
||||
serverArgs,
|
||||
agentArgs,
|
||||
)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
host := strings.Split(url.Host, ":")
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := ctrlClient.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
var kubeconfig []byte
|
||||
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
|
||||
|
||||
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
|
||||
}
|
||||
|
||||
func validateCreateFlags(clx *cli.Context) error {
|
||||
if persistenceType != cluster.EphermalNodesType &&
|
||||
persistenceType != cluster.DynamicNodesType {
|
||||
return errors.New("invalid persistence type")
|
||||
}
|
||||
if token == "" {
|
||||
return errors.New("empty cluster token")
|
||||
}
|
||||
if name == "" {
|
||||
return errors.New("empty cluster name")
|
||||
}
|
||||
if servers <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
|
||||
return errors.New("empty kubeconfig")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceCIDR string, serverArgs, agentArgs []string) *v1alpha1.Cluster {
|
||||
return &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Cluster",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Name: name,
|
||||
Token: token,
|
||||
Servers: &servers,
|
||||
Agents: &agents,
|
||||
ClusterCIDR: clusterCIDR,
|
||||
ServiceCIDR: serviceCIDR,
|
||||
ServerArgs: serverArgs,
|
||||
AgentArgs: agentArgs,
|
||||
Version: version,
|
||||
Persistence: &v1alpha1.PersistenceConfig{
|
||||
Type: persistenceType,
|
||||
StorageClassName: storageClassName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, serverIP string) ([]byte, error) {
|
||||
nn := types.NamespacedName{
|
||||
Name: cluster.Name + "-kubeconfig",
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
}
|
||||
|
||||
var kubeSecret v1.Secret
|
||||
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeconfig := kubeSecret.Data["kubeconfig.yaml"]
|
||||
if kubeconfig == nil {
|
||||
return nil, errors.New("empty kubeconfig")
|
||||
}
|
||||
|
||||
nn = types.NamespacedName{
|
||||
Name: "k3k-server-service",
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
}
|
||||
|
||||
var k3kService v1.Service
|
||||
if err := client.Get(ctx, nn, &k3kService); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
|
||||
nodePort := k3kService.Spec.Ports[0].NodePort
|
||||
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hostURL := fmt.Sprintf("https://%s:%d", serverIP, nodePort)
|
||||
restConfig.Host = hostURL
|
||||
|
||||
clientConfig := generateKubeconfigFromRest(restConfig)
|
||||
|
||||
b, err := clientcmd.Write(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubeconfig = b
|
||||
}
|
||||
|
||||
return kubeconfig, nil
|
||||
}
|
||||
|
||||
func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
|
||||
clusters := make(map[string]*clientcmdapi.Cluster)
|
||||
clusters["default-cluster"] = &clientcmdapi.Cluster{
|
||||
Server: config.Host,
|
||||
CertificateAuthorityData: config.CAData,
|
||||
}
|
||||
|
||||
contexts := make(map[string]*clientcmdapi.Context)
|
||||
contexts["default-context"] = &clientcmdapi.Context{
|
||||
Cluster: "default-cluster",
|
||||
Namespace: "default",
|
||||
AuthInfo: "default",
|
||||
}
|
||||
|
||||
authinfos := make(map[string]*clientcmdapi.AuthInfo)
|
||||
authinfos["default"] = &clientcmdapi.AuthInfo{
|
||||
ClientCertificateData: config.CertData,
|
||||
ClientKeyData: config.KeyData,
|
||||
}
|
||||
|
||||
clientConfig := clientcmdapi.Config{
|
||||
Kind: "Config",
|
||||
APIVersion: "v1",
|
||||
Clusters: clusters,
|
||||
Contexts: contexts,
|
||||
CurrentContext: "default-context",
|
||||
AuthInfos: authinfos,
|
||||
}
|
||||
|
||||
return clientConfig
|
||||
}
|
||||
1
cli/cmds/cluster/delete.go
Normal file
1
cli/cmds/cluster/delete.go
Normal file
@@ -0,0 +1 @@
|
||||
package cluster
|
||||
@@ -1,213 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type CreateConfig struct {
|
||||
token string
|
||||
clusterCIDR string
|
||||
serviceCIDR string
|
||||
servers int
|
||||
agents int
|
||||
serverArgs cli.StringSlice
|
||||
agentArgs cli.StringSlice
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
version string
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
clusterset string
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
createFlags := NewCreateFlags(createConfig)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(appCtx, createConfig),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
// if clusterset is set, use the namespace of the clusterset
|
||||
if config.clusterset != "" {
|
||||
namespace = appCtx.Namespace(config.clusterset)
|
||||
}
|
||||
|
||||
if err := createNamespace(ctx, client, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if clusterset is set, create the cluster set
|
||||
if config.clusterset != "" {
|
||||
namespace = appCtx.Namespace(config.clusterset)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "default", Namespace: namespace}, clusterSet); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterSet, err = createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), config.clusterset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("ClusterSet in namespace [%s] available", namespace)
|
||||
|
||||
if !slices.Contains(clusterSet.Spec.AllowedModeTypes, v1alpha1.ClusterMode(config.mode)) {
|
||||
return fmt.Errorf("invalid '%s' Cluster mode. ClusterSet only allows %v", config.mode, clusterSet.Spec.AllowedModeTypes)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(config.version, "+") {
|
||||
orig := config.version
|
||||
config.version = strings.Replace(config.version, "+", "-", -1)
|
||||
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Info("Creating cluster token secret")
|
||||
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
|
||||
|
||||
if err := client.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := newCluster(name, namespace, config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if config.kubeconfigServerHost != "" {
|
||||
host = []string{config.kubeconfigServerHost}
|
||||
}
|
||||
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := client.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
// retry every 5s for at most 2m, or 25 times
|
||||
availableBackoff := wait.Backoff{
|
||||
Duration: 5 * time.Second,
|
||||
Cap: 2 * time.Minute,
|
||||
Steps: 25,
|
||||
}
|
||||
|
||||
cfg := kubeconfig.New()
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, client, cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Cluster",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Servers: ptr.To(int32(config.servers)),
|
||||
Agents: ptr.To(int32(config.agents)),
|
||||
ClusterCIDR: config.clusterCIDR,
|
||||
ServiceCIDR: config.serviceCIDR,
|
||||
ServerArgs: config.serverArgs.Value(),
|
||||
AgentArgs: config.agentArgs.Value(),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.PersistenceMode(config.persistenceType),
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
},
|
||||
},
|
||||
}
|
||||
if config.storageClassName == "" {
|
||||
cluster.Spec.Persistence.StorageClassName = nil
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
cluster.Spec.TokenSecretRef = &v1.SecretReference{
|
||||
Name: k3kcluster.TokenSecretName(name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "servers",
|
||||
Usage: "number of servers",
|
||||
Destination: &config.servers,
|
||||
Value: 1,
|
||||
Action: func(ctx *cli.Context, value int) error {
|
||||
if value <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "agents",
|
||||
Usage: "number of agents",
|
||||
Destination: &config.agents,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "token of the cluster",
|
||||
Destination: &config.token,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
Usage: "cluster CIDR",
|
||||
Destination: &config.clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-cidr",
|
||||
Usage: "service CIDR",
|
||||
Destination: &config.serviceCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "persistence-type",
|
||||
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
|
||||
Value: string(v1alpha1.DynamicPersistenceMode),
|
||||
Destination: &config.persistenceType,
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch v1alpha1.PersistenceMode(value) {
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage-class-name",
|
||||
Usage: "storage class name for dynamic persistence type",
|
||||
Destination: &config.storageClassName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Destination: &config.serverArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Destination: &config.agentArgs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "k3s version",
|
||||
Destination: &config.version,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "k3k mode type (shared, virtual)",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &config.kubeconfigServerHost,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "clusterset",
|
||||
Usage: "The clusterset to create the cluster in",
|
||||
Destination: &config.clusterset,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
var keepData bool
|
||||
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, &cli.BoolFlag{
|
||||
Name: "keep-data",
|
||||
Usage: "keeps persistence volumes created for the cluster after deletion",
|
||||
Destination: &keepData,
|
||||
}),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func delete(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// keep bootstrap secrets and tokens if --keep-data flag is passed
|
||||
if keepData {
|
||||
// skip removing tokenSecret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip removing webhook secret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
|
||||
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
|
||||
matchingLabels.ApplyToList(&listOpts)
|
||||
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
|
||||
|
||||
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, &cluster); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
|
||||
var secret v1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if err := cl.Get(ctx, key, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("%s secret is not found", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerutil.HasControllerReference(&secret) {
|
||||
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cl.Update(ctx, &secret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewClusterSetCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "clusterset",
|
||||
Usage: "clusterset command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterSetCreateCmd(appCtx),
|
||||
NewClusterSetDeleteCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type ClusterSetCreateConfig struct {
|
||||
mode string
|
||||
displayName string
|
||||
}
|
||||
|
||||
func NewClusterSetCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
config := &ClusterSetCreateConfig{}
|
||||
|
||||
createFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "The allowed mode type of the clusterset",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "display-name",
|
||||
Usage: "The display name of the clusterset",
|
||||
Destination: &config.displayName,
|
||||
},
|
||||
}
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new clusterset",
|
||||
UsageText: "k3kcli clusterset create [command options] NAME",
|
||||
Action: clusterSetCreateAction(appCtx, config),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func clusterSetCreateAction(appCtx *AppContext, config *ClusterSetCreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
displayName := config.displayName
|
||||
if displayName == "" {
|
||||
displayName = name
|
||||
}
|
||||
|
||||
// if both display name and namespace are set the name is ignored
|
||||
if config.displayName != "" && appCtx.namespace != "" {
|
||||
logrus.Warnf("Ignoring name [%s] because display name and namespace are set", name)
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
if err := createNamespace(ctx, client, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), displayName)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(ctx context.Context, client client.Client, name string) error {
|
||||
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`Creating namespace [%s]`, name)
|
||||
|
||||
if err := client.Create(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClusterSet(ctx context.Context, client client.Client, namespace string, mode v1alpha1.ClusterMode, displayName string) (*v1alpha1.ClusterSet, error) {
|
||||
logrus.Infof("Creating clusterset in namespace [%s]", namespace)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterSet",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{mode},
|
||||
DisplayName: displayName,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Create(ctx, clusterSet); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("ClusterSet in namespace [%s] already exists", namespace)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return clusterSet, nil
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func NewClusterSetDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing clusterset",
|
||||
UsageText: "k3kcli clusterset delete [command options] NAME",
|
||||
Action: clusterSetDeleteAction(appCtx),
|
||||
Flags: WithCommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func clusterSetDeleteAction(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting clusterset in namespace [%s]", namespace)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, clusterSet); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("ClusterSet not found in namespace [%s]", namespace)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
var (
|
||||
name string
|
||||
cn string
|
||||
org cli.StringSlice
|
||||
altNames cli.StringSlice
|
||||
expirationDays int64
|
||||
configName string
|
||||
kubeconfigServerHost string
|
||||
generateKubeconfigFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "cluster name",
|
||||
Destination: &name,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-name",
|
||||
Usage: "the name of the generated kubeconfig file",
|
||||
Destination: &configName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cn",
|
||||
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
|
||||
Destination: &cn,
|
||||
Value: controller.AdminCommonName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "org",
|
||||
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
|
||||
Value: &org,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "altNames",
|
||||
Usage: "altNames of the generated certificates for the kubeconfig",
|
||||
Value: &altNames,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "expiration-days",
|
||||
Usage: "Expiration date of the certificates used for the kubeconfig",
|
||||
Destination: &expirationDays,
|
||||
Value: 356,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &kubeconfigServerHost,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: []*cli.Command{
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: appCtx.Namespace(name),
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, client, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(&cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, configName))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(configName, kubeconfigData, 0644)
|
||||
}
|
||||
119
cli/cmds/root.go
119
cli/cmds/root.go
@@ -1,117 +1,42 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type AppContext struct {
|
||||
RestConfig *rest.Config
|
||||
Client client.Client
|
||||
|
||||
// Global flags
|
||||
Debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
}
|
||||
var (
|
||||
debug bool
|
||||
Kubeconfig string
|
||||
CommonFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVar: "KUBECONFIG",
|
||||
Usage: "Kubeconfig path",
|
||||
Destination: &Kubeconfig,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func NewApp() *cli.App {
|
||||
appCtx := &AppContext{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
app.Usage = "CLI for K3K"
|
||||
app.Flags = WithCommonFlags(appCtx)
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &debug,
|
||||
EnvVar: "K3K_DEBUG",
|
||||
},
|
||||
}
|
||||
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if appCtx.Debug {
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Version = buildinfo.Version
|
||||
cli.VersionPrinter = func(cCtx *cli.Context) {
|
||||
fmt.Println("k3kcli Version: " + buildinfo.Version)
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
NewClusterCmd(appCtx),
|
||||
NewClusterSetCmd(appCtx),
|
||||
NewKubeconfigCmd(appCtx),
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func (ctx *AppContext) Namespace(name string) string {
|
||||
if ctx.namespace != "" {
|
||||
return ctx.namespace
|
||||
}
|
||||
|
||||
return "k3k-" + name
|
||||
}
|
||||
|
||||
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
|
||||
if kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
return kubeConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag {
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &appCtx.Debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &appCtx.Kubeconfig,
|
||||
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Destination: &appCtx.namespace,
|
||||
},
|
||||
}
|
||||
|
||||
return append(commonFlags, flags...)
|
||||
}
|
||||
|
||||
13
cli/main.go
13
cli/main.go
@@ -4,11 +4,24 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/cli/cmds/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
const (
|
||||
program = "k3k"
|
||||
version = "dev"
|
||||
gitCommit = "HEAD"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Commands = []cli.Command{
|
||||
cluster.NewClusterCommand(),
|
||||
}
|
||||
app.Version = version + " (" + gitCommit + ")"
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
# Advanced Usage
|
||||
|
||||
This document provides advanced usage information for k3k, including detailed use cases and explanations of the `Cluster` resource fields for customization.
|
||||
|
||||
## Customizing the Cluster Resource
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
|
||||
|
||||
|
||||
|
||||
This example creates a "shared" mode K3k cluster with:
|
||||
|
||||
- 3 servers
|
||||
- K3s version v1.31.3-k3s1
|
||||
- Custom network configuration
|
||||
- Deployment on specific nodes with the `nodeSelector`
|
||||
- `kube-api` exposed using an ingress
|
||||
- Custom K3s `serverArgs`
|
||||
- ETCD data persisted using a `PVC`
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: my-virtual-cluster
|
||||
namespace: my-namespace
|
||||
spec:
|
||||
mode: shared
|
||||
version: v1.31.3-k3s1
|
||||
servers: 3
|
||||
tlsSANs:
|
||||
- my-cluster.example.com
|
||||
nodeSelector:
|
||||
disktype: ssd
|
||||
expose:
|
||||
ingress:
|
||||
ingressClassName: nginx
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "HTTPS"
|
||||
clusterCIDR: 10.42.0.0/16
|
||||
serviceCIDR: 10.43.0.0/16
|
||||
clusterDNS: 10.43.0.10
|
||||
serverArgs:
|
||||
- --tls-san=my-cluster.example.com
|
||||
persistence:
|
||||
type: dynamic
|
||||
storageClassName: local-path
|
||||
```
|
||||
|
||||
|
||||
### `mode`
|
||||
|
||||
The `mode` field specifies the cluster provisioning mode, which can be either `shared` or `virtual`. The default mode is `shared`.
|
||||
|
||||
* **`shared` mode:** In this mode, the virtual cluster shares the host cluster's resources and networking. This mode is suitable for lightweight workloads and development environments where isolation is not a primary concern.
|
||||
* **`virtual` mode:** In this mode, the virtual cluster runs as a separate K3s cluster within the host cluster. This mode provides stronger isolation and is suitable for production workloads or when dedicated resources are required.
|
||||
|
||||
|
||||
### `version`
|
||||
|
||||
The `version` field specifies the Kubernetes version to be used by the virtual nodes. If not specified, K3k will use the same K3s version as the host cluster. For example, if the host cluster is running Kubernetes v1.31.3, K3k will use the corresponding K3s version (e.g., `v1.31.3-k3s1`).
|
||||
|
||||
|
||||
### `servers`
|
||||
|
||||
The `servers` field specifies the number of K3s server nodes to deploy for the virtual cluster. The default value is 1.
|
||||
|
||||
|
||||
### `agents`
|
||||
|
||||
The `agents` field specifies the number of K3s agent nodes to deploy for the virtual cluster. The default value is 0.
|
||||
|
||||
**Note:** In `shared` mode, this field is ignored, as the Virtual Kubelet acts as the agent, and there are no K3s worker nodes.
|
||||
|
||||
|
||||
### `nodeSelector`
|
||||
|
||||
The `nodeSelector` field allows you to specify a node selector that will be applied to all server/agent pods. In `shared` mode, the node selector will also be applied to the workloads.
|
||||
|
||||
|
||||
### `expose`
|
||||
|
||||
The `expose` field contains options for exposing the API server of the virtual cluster. By default, the API server is only exposed as a `ClusterIP`, which is relatively secure but difficult to access from outside the cluster.
|
||||
|
||||
You can use the `expose` field to enable exposure via `NodePort`, `LoadBalancer`, or `Ingress`.
|
||||
|
||||
In this example we are exposing the Cluster with a Nginx ingress-controller, that has to be configured with the `--enable-ssl-passthrough` flag.
|
||||
|
||||
|
||||
### `clusterCIDR`
|
||||
|
||||
The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16` in shared mode, and `10.52.0.0/16` in virtual mode.
|
||||
|
||||
|
||||
### `serviceCIDR`
|
||||
|
||||
The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16` in shared mode, and `10.53.0.0/16` in virtual mode.
|
||||
|
||||
**Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts and in `virtual` mode both `serviceCIDR` and `clusterCIDR` should be different than the host cluster.
|
||||
|
||||
|
||||
### `clusterDNS`
|
||||
|
||||
The `clusterDNS` field specifies the IP address for the CoreDNS service. It needs to be in the range provided by `serviceCIDR`. The default value is `10.43.0.10`.
|
||||
|
||||
|
||||
### `serverArgs`
|
||||
|
||||
The `serverArgs` field allows you to specify additional arguments to be passed to the K3s server pods.
|
||||
|
||||
## Using the cli
|
||||
|
||||
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
|
||||
|
||||
### No storage provider:
|
||||
|
||||
* Ephemeral Storage:
|
||||
|
||||
```bash
|
||||
k3kcli cluster create --persistence-type ephemeral my-cluster
|
||||
```
|
||||
|
||||
*Important Notes:*
|
||||
|
||||
* Using `--persistence-type ephemeral` will result in data loss if the nodes are restarted.
|
||||
|
||||
* It is highly recommended to use `--persistence-type dynamic` with a configured storage class.
|
||||
@@ -1,121 +0,0 @@
|
||||
# Architecture
|
||||
|
||||
Virtual Clusters are isolated Kubernetes clusters provisioned on a physical cluster. K3k leverages [K3s](https://k3s.io/) as the control plane of the Kubernetes cluster because of its lightweight footprint.
|
||||
|
||||
K3k provides two modes of deploying virtual clusters: the "shared" mode (default), and "virtual".
|
||||
|
||||
|
||||
## Shared Mode
|
||||
|
||||
The default `shared` mode uses a K3s server as control plane with an [agentless servers configuration](https://docs.k3s.io/advanced#running-agentless-servers-experimental). With this option enabled, the servers do not run the kubelet, container runtime, or CNI. The server uses a [Virtual Kubelet](https://virtual-kubelet.io/) provider implementation specific to K3k, which schedules the workloads and other eventually needed resources on the host cluster. This K3k Virtual Kubelet provider handles the reflection of resources and workload execution within the shared host cluster environment.
|
||||
|
||||

|
||||
|
||||
|
||||
### Networking and Storage
|
||||
|
||||
Because of this shared infrastructure, the CNI will be the same one configured in the host cluster. To provide the needed isolation, K3k will leverage Network Policies.
|
||||
|
||||
The same goes for the available storage, so the Storage Classes and Volumes are those of the host cluster.
|
||||
|
||||
|
||||
### Resource Sharing and Limits
|
||||
|
||||
In shared mode, K3k leverages Kubernetes ResourceQuotas and LimitRanges to manage resource sharing and enforce limits. Since all virtual cluster workloads run within the same namespace on the host cluster, ResourceQuotas are applied to this namespace to limit the total resources consumed by a virtual cluster. LimitRanges are used to set default resource requests and limits for pods, ensuring that workloads have reasonable resource allocations even if they don't explicitly specify them.
|
||||
|
||||
Each pod in a virtual cluster is assigned a unique name that incorporates the pod name, namespace, and cluster name. This prevents naming collisions in the shared host cluster namespace.
|
||||
|
||||
It's important to understand that ResourceQuotas are applied at the namespace level. This means that all pods within a virtual cluster share the same quota. While this provides overall limits for the virtual cluster, it also means that resource allocation is dynamic. If one workload isn't using its full resource allocation, other workloads within the *same* virtual cluster can utilize those resources, even if they belong to different deployments or services.
|
||||
|
||||
This dynamic sharing can be both a benefit and a challenge. It allows for efficient resource utilization, but it can also lead to unpredictable performance if workloads have varying resource demands. Furthermore, this approach makes it difficult to guarantee strict resource isolation between workloads within the same virtual cluster.
|
||||
|
||||
GPU resource sharing is an area of ongoing investigation. K3k is actively exploring potential solutions in this area.
|
||||
|
||||
|
||||
### Isolation and Security
|
||||
|
||||
Isolation between virtual clusters in shared mode relies heavily on Kubernetes Network Policies. Network Policies define rules that control the network traffic allowed to and from pods. K3k configures Network Policies to ensure that pods in one virtual cluster cannot communicate with pods in other virtual clusters or with pods in the host cluster itself, providing a strong foundation for network isolation.
|
||||
|
||||
While Network Policies offer robust isolation capabilities, it's important to understand their characteristics:
|
||||
|
||||
* **CNI Integration:** Network Policies integrate seamlessly with supported CNI plugins. K3k leverages this integration to enforce network isolation.
|
||||
* **Granular Control:** Network Policies provide granular control over network traffic, allowing for fine-tuned security policies.
|
||||
* **Scalability:** Network Policies scale well with the number of virtual clusters and applications, ensuring consistent isolation as the environment grows.
|
||||
|
||||
K3k also utilizes Kubernetes Pod Security Admission (PSA) to enforce security policies within virtual clusters based on Pod Security Standards (PSS). PSS define different levels of security for pods, restricting what actions pods can perform. By configuring PSA to enforce a specific PSS level (e.g., `baseline` or `restricted`) for a virtual cluster, K3k ensures that pods adhere to established security best practices and prevents them from using privileged features or performing potentially dangerous operations.
|
||||
|
||||
Key aspects of PSA integration include:
|
||||
|
||||
* **Namespace-Level Enforcement:** PSA configuration is applied at the namespace level, providing a consistent security posture for all pods within the virtual cluster.
|
||||
* **Standardized Profiles:** PSS offers a set of predefined security profiles aligned with industry best practices, simplifying security configuration and ensuring a baseline level of security.
|
||||
|
||||
The shared mode architecture is designed with security in mind. K3k employs multiple layers of security controls, including Network Policies and PSA, to protect virtual clusters and the host cluster. While the shared namespace model requires careful configuration and management, these controls provide a robust security foundation for running workloads in a multi-tenant environment. K3k continuously evaluates and enhances its security mechanisms to address evolving threats and ensure the highest level of protection for its users.
|
||||
|
||||
|
||||
## Virtual Mode
|
||||
|
||||
The `virtual` mode in K3k deploys fully functional K3s clusters (including both server and agent components) as virtual clusters. These K3s clusters run as pods within the host cluster. Each virtual cluster has its own dedicated K3s server and one or more K3s agents acting as worker nodes. This approach provides strong isolation, as each virtual cluster operates independently with its own control plane and worker nodes. While these virtual clusters run as pods on the host cluster, they function as complete and separate Kubernetes environments.
|
||||
|
||||

|
||||
|
||||
|
||||
### Networking and Storage
|
||||
|
||||
Virtual clusters in `virtual` mode each have their own independent networking configuration managed by their respective K3s servers. Each virtual cluster runs its own CNI plugin, configured within its K3s server, providing complete network isolation from other virtual clusters and the host cluster. While the virtual cluster networks ultimately operate on top of the host cluster's network infrastructure, the networking configuration and traffic management are entirely separate.
|
||||
|
||||
|
||||
### Resource Sharing and Limits
|
||||
|
||||
Resource sharing in `virtual` mode is managed by applying resource limits to the pods that make up the virtual cluster (both the K3s server pod and the K3s agent pods). Each pod is assigned a specific amount of CPU, memory, and other resources. The workloads running *within* the virtual cluster then utilize these allocated resources. This means that the virtual cluster as a whole has a defined resource pool determined by the limits on its constituent pods.
|
||||
|
||||
This approach provides a clear and direct way to control the resources available to each virtual cluster. However, it requires careful resource planning to ensure that each virtual cluster has sufficient capacity for its workloads.
|
||||
|
||||
|
||||
### Isolation and Security
|
||||
|
||||
The `virtual` mode offers strong isolation due to the dedicated K3s clusters deployed for each virtual cluster. Because each virtual cluster runs its own separate control plane and worker nodes, workloads are effectively isolated from each other and from the host cluster. This architecture minimizes the risk of one virtual cluster impacting others or the host cluster.
|
||||
|
||||
Security in `virtual` mode benefits from the inherent isolation provided by the separate K3s clusters. However, standard Kubernetes security best practices still apply, and K3k emphasizes a layered security approach. While the K3s server pods often run with elevated privileges (due to the nature of their function, requiring access to system resources), K3k recommends minimizing these privileges whenever possible and adhering to the principle of least privilege. This can be achieved by carefully configuring the necessary capabilities instead of relying on full `privileged` mode. Further information on K3s security best practices can be found in the official K3s documentation: [https://docs.k3s.io/security](https://docs.k3s.io/security) (This link provides general security guidance, including discussions of capabilities and other relevant topics).
|
||||
|
||||
Currently security in virtual mode has a risk of privilege escalation as the server pods run with elevated privileges (due to the nature of their function, requiring access to system resources).
|
||||
|
||||
|
||||
## K3k Components
|
||||
|
||||
K3k consists of two main components:
|
||||
|
||||
* **Controller:** The K3k controller is a core component that runs on the host cluster. It watches for `Cluster` custom resources (CRs) and manages the lifecycle of virtual clusters. When a new `Cluster` CR is created, the controller provisions the necessary resources, including namespaces, K3s server and agent pods, and network configurations, to create the virtual cluster.
|
||||
* **CLI:** The K3k CLI provides a command-line interface for interacting with K3k. It allows users to easily create, manage, and access virtual clusters. The CLI simplifies common tasks such as creating `Cluster` CRs, retrieving kubeconfigs for accessing virtual clusters, and performing other management operations.
|
||||
|
||||
|
||||
## Comparison and Trade-offs
|
||||
|
||||
K3k offers two distinct modes for deploying virtual clusters: `shared` and `virtual`. Each mode has its own strengths and weaknesses, and the best choice depends on the specific needs and priorities of the user. Here's a comparison to help you make an informed decision:
|
||||
|
||||
| Feature | Shared Mode | Virtual Mode |
|
||||
|---|---|---|
|
||||
| **Architecture** | Agentless K3s server with Virtual Kubelet | Full K3s cluster (server and agents) as pods |
|
||||
| **Isolation** | Network Policies | Dedicated control plane and worker nodes |
|
||||
| **Resource Sharing** | Dynamic, namespace-level ResourceQuotas | Resource limits on virtual cluster pods |
|
||||
| **Networking** | Host cluster's CNI | Virtual cluster's own CNI |
|
||||
| **Storage** | Host cluster's storage | *Under development* |
|
||||
| **Security** | Pod Security Admission (PSA), Network Policies | Inherent isolation, PSA, Network Policies, secure host configuration |
|
||||
| **Performance** | Smaller footprint, more efficient due to running directly on the host | Higher overhead due to running full K3s clusters |
|
||||
|
||||
**Trade-offs:**
|
||||
|
||||
* **Isolation vs. Overhead:** The `shared` mode has lower overhead but weaker isolation, while the `virtual` mode provides stronger isolation but potentially higher overhead due to running full K3s clusters.
|
||||
* **Resource Sharing:** The `shared` mode offers dynamic resource sharing within a namespace, which can be efficient but less predictable. The `virtual` mode provides dedicated resources to each virtual cluster, offering more control but requiring careful planning.
|
||||
|
||||
**Choosing the right mode:**
|
||||
|
||||
* **Choose `shared` mode if:**
|
||||
* You prioritize low overhead and resource efficiency.
|
||||
* You need a simple setup and don't require strong isolation between virtual clusters.
|
||||
* Your workloads don't have strict performance requirements.
|
||||
* Your workloads needs host capacities (GPU)
|
||||
* **Choose `virtual` mode if:**
|
||||
* You prioritize strong isolation.
|
||||
* You need dedicated resources and predictable performance for your virtual clusters.
|
||||
|
||||
Ultimately, the best choice depends on your specific requirements and priorities. Consider the trade-offs carefully and choose the mode that best aligns with your needs.
|
||||
@@ -1,146 +0,0 @@
|
||||
# NAME
|
||||
|
||||
k3kcli - CLI for K3K
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
k3kcli
|
||||
|
||||
```
|
||||
[--debug]
|
||||
[--kubeconfig]=[value]
|
||||
[--namespace]=[value]
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
|
||||
```
|
||||
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||
```
|
||||
|
||||
# GLOBAL OPTIONS
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
|
||||
# COMMANDS
|
||||
|
||||
## cluster
|
||||
|
||||
cluster command
|
||||
|
||||
### create
|
||||
|
||||
Create new cluster
|
||||
|
||||
>k3kcli cluster create [command options] NAME
|
||||
|
||||
**--agent-args**="": agents extra arguments
|
||||
|
||||
**--agents**="": number of agents (default: 0)
|
||||
|
||||
**--cluster-cidr**="": cluster CIDR
|
||||
|
||||
**--clusterset**="": The clusterset to create the cluster in
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
|
||||
|
||||
**--server-args**="": servers extra arguments
|
||||
|
||||
**--servers**="": number of servers (default: 1)
|
||||
|
||||
**--service-cidr**="": service CIDR
|
||||
|
||||
**--storage-class-name**="": storage class name for dynamic persistence type
|
||||
|
||||
**--token**="": token of the cluster
|
||||
|
||||
**--version**="": k3s version
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing cluster
|
||||
|
||||
>k3kcli cluster delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--keep-data**: keeps persistence volumes created for the cluster after deletion
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
## clusterset
|
||||
|
||||
clusterset command
|
||||
|
||||
### create
|
||||
|
||||
Create new clusterset
|
||||
|
||||
>k3kcli clusterset create [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--display-name**="": The display name of the clusterset
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--mode**="": The allowed mode type of the clusterset (default: "shared")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing clusterset
|
||||
|
||||
>k3kcli clusterset delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
## kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
|
||||
### generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
|
||||
**--altNames**="": altNames of the generated certificates for the kubeconfig
|
||||
|
||||
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
|
||||
|
||||
**--config-name**="": the name of the generated kubeconfig file
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--name**="": cluster name
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
@@ -1,37 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Instantiate the CLI application
|
||||
app := cmds.NewApp()
|
||||
|
||||
// Generate the Markdown documentation
|
||||
md, err := app.ToMarkdown()
|
||||
if err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
|
||||
|
||||
err = os.WriteFile(outputFile, []byte(md), 0644)
|
||||
if err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Documentation generated at " + outputFile)
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
processor:
|
||||
# RE2 regular expressions describing types that should be excluded from the generated documentation.
|
||||
ignoreTypes:
|
||||
- ClusterSet
|
||||
- ClusterSetList
|
||||
|
||||
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
|
||||
ignoreFields:
|
||||
- "status$"
|
||||
- "TypeMeta$"
|
||||
|
||||
render:
|
||||
# Version of Kubernetes to use when generating links to Kubernetes API documentation.
|
||||
kubernetesVersion: "1.31"
|
||||
@@ -1,220 +0,0 @@
|
||||
# API Reference
|
||||
|
||||
## Packages
|
||||
- [k3k.io/v1alpha1](#k3kiov1alpha1)
|
||||
|
||||
|
||||
## k3k.io/v1alpha1
|
||||
|
||||
|
||||
### Resource Types
|
||||
- [Cluster](#cluster)
|
||||
- [ClusterList](#clusterlist)
|
||||
|
||||
|
||||
|
||||
#### Addon
|
||||
|
||||
|
||||
|
||||
Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretNamespace` _string_ | SecretNamespace is the namespace of the Secret. | | |
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterList](#clusterlist)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `Cluster` | | |
|
||||
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
|
||||
|
||||
|
||||
ClusterList is a list of Cluster resources.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
|
||||
| `kind` _string_ | `ClusterList` | | |
|
||||
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
|
||||
| `items` _[Cluster](#cluster) array_ | | | |
|
||||
|
||||
|
||||
#### ClusterMode
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
ClusterMode is the possible provisioning mode of a Cluster.
|
||||
|
||||
_Validation:_
|
||||
- Enum: [shared virtual]
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
|
||||
|
||||
#### ClusterSpec
|
||||
|
||||
|
||||
|
||||
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [Cluster](#cluster)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `version` _string_ | Version is the K3s version to use for the virtual nodes.<br />It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).<br />If not specified, the Kubernetes version of the host node will be used. | | |
|
||||
| `mode` _[ClusterMode](#clustermode)_ | Mode specifies the cluster provisioning mode: "shared" or "virtual".<br />Defaults to "shared". This field is immutable. | shared | Enum: [shared virtual] <br /> |
|
||||
| `servers` _integer_ | Servers specifies the number of K3s pods to run in server (control plane) mode.<br />Must be at least 1. Defaults to 1. | 1 | |
|
||||
| `agents` _integer_ | Agents specifies the number of K3s pods to run in agent (worker) mode.<br />Must be 0 or greater. Defaults to 0.<br />This field is ignored in "shared" mode. | 0 | |
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
|
||||
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### ExposeConfig
|
||||
|
||||
|
||||
|
||||
ExposeConfig specifies options for exposing the API server.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
|
||||
|
||||
|
||||
#### IngressConfig
|
||||
|
||||
|
||||
|
||||
IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ExposeConfig](#exposeconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `annotations` _object (keys:string, values:string)_ | Annotations specifies annotations to add to the Ingress. | | |
|
||||
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
|
||||
|
||||
|
||||
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ExposeConfig](#exposeconfig)
|
||||
|
||||
|
||||
|
||||
#### NodePortConfig
|
||||
|
||||
|
||||
|
||||
NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ExposeConfig](#exposeconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverPort` _integer_ | ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `servicePort` _integer_ | ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
| `etcdPort` _integer_ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.<br />If not specified, a port will be allocated (default: 30000-32767). | | |
|
||||
|
||||
|
||||
#### PersistenceConfig
|
||||
|
||||
|
||||
|
||||
PersistenceConfig specifies options for persisting etcd data.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
PersistenceMode is the storage mode of a Cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [PersistenceConfig](#persistenceconfig)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
# Development
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To start developing K3k you will need:
|
||||
|
||||
- Go
|
||||
- Docker
|
||||
- Helm
|
||||
- A running Kubernetes cluster
|
||||
|
||||
|
||||
### TLDR
|
||||
|
||||
```shell
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# These environment variables configure the image repository and tag.
|
||||
export REPO=ghcr.io/myuser
|
||||
export VERSION=dev-$(date -u '+%Y%m%d%H%M')
|
||||
|
||||
make
|
||||
make push
|
||||
make install
|
||||
```
|
||||
|
||||
### Makefile
|
||||
|
||||
To see all the available Make commands you can run `make help`, i.e:
|
||||
|
||||
```
|
||||
-> % make help
|
||||
all Run 'make' or 'make all' to run 'version', 'build-crds', 'build' and 'package'
|
||||
version Print the current version
|
||||
build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
push Push the K3k images to the registry
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
build-crds Build the CRDs specs
|
||||
docs Build the CRDs docs
|
||||
lint Find any linting issues in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
help Show this help.
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`.
|
||||
|
||||
By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var:
|
||||
|
||||
```
|
||||
REPO=ghcr.io/userorg make
|
||||
```
|
||||
|
||||
To customize the tag you can also explicitly set the VERSION:
|
||||
|
||||
```
|
||||
VERSION=dev-$(date -u '+%Y%m%d%H%M') make
|
||||
```
|
||||
|
||||
|
||||
### Push
|
||||
|
||||
You will need to push the built images to your registry, and you can use the `make push` command to do this.
|
||||
|
||||
|
||||
### Install
|
||||
|
||||
Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release.
|
||||
|
||||
|
||||
## Tests
|
||||
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
|
||||
|
||||
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
|
||||
The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder.
|
||||
|
||||
|
||||
## CRDs and Docs
|
||||
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
|
||||
|
||||
Remember also to update the CRDs documentation running the `make docs` command.
|
||||
|
||||
## How to install k3k on k3d
|
||||
|
||||
This document provides a guide on how to install k3k on [k3d](https://k3d.io).
|
||||
|
||||
### Installing k3d
|
||||
|
||||
Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation.
|
||||
|
||||
Create the k3d cluster in the following way:
|
||||
|
||||
```bash
|
||||
k3d cluster create k3k -p "30000-30010:30000-30010@server:0"
|
||||
```
|
||||
|
||||
With this syntax ports from 30000 to 30010 will be exposed on the host.
|
||||
|
||||
### Install k3k
|
||||
|
||||
Install now k3k as usual:
|
||||
|
||||
```bash
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
```
|
||||
|
||||
### Create a virtual cluster
|
||||
|
||||
Once the k3k controller is up and running, create a namespace where to create our first virtual cluster.
|
||||
|
||||
```bash
|
||||
kubectl create ns k3k-mycluster
|
||||
```
|
||||
|
||||
Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step:
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
expose:
|
||||
nodePort:
|
||||
serverPort: 30001
|
||||
EOF
|
||||
```
|
||||
|
||||
Check when the cluster is ready:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-mycluster
|
||||
```
|
||||
|
||||
Last thing to do is to get the kubeconfig to connect to the virtual cluster we've just created:
|
||||
|
||||
```bash
|
||||
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 253 KiB |
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 193 KiB |
@@ -1,11 +0,0 @@
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: ClusterSet
|
||||
metadata:
|
||||
name: clusterset-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedModeTypes:
|
||||
# - "shared"
|
||||
# - "virtual"
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
# defaultPriorityClass: "lowpriority"
|
||||
@@ -3,7 +3,6 @@ kind: Cluster
|
||||
metadata:
|
||||
name: example1
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: Cluster
|
||||
metadata:
|
||||
name: single-server
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
|
||||
264
go.mod
264
go.mod
@@ -1,218 +1,74 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.23.4
|
||||
go 1.19
|
||||
|
||||
replace (
|
||||
github.com/google/cel-go => github.com/google/cel-go v0.17.7
|
||||
github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.47.0
|
||||
golang.org/x/term => golang.org/x/term v0.15.0
|
||||
require (
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/urfave/cli v1.22.12
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
k8s.io/klog v1.0.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.36.0
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/rancher/dynamiclistener v1.27.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.35.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.0
|
||||
go.etcd.io/etcd/api/v3 v3.5.16
|
||||
go.etcd.io/etcd/client/v3 v3.5.16
|
||||
go.uber.org/zap v1.27.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.14.4
|
||||
k8s.io/api v0.29.11
|
||||
k8s.io/apimachinery v0.29.11
|
||||
k8s.io/apiserver v0.29.11
|
||||
k8s.io/client-go v0.29.11
|
||||
k8s.io/component-base v0.29.11
|
||||
k8s.io/component-helpers v0.29.11
|
||||
k8s.io/kubectl v0.29.11
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.17.5
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.24 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v27.1.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.0 // indirect
|
||||
k8s.io/component-base v0.26.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
github.com/rancher/dynamiclistener v0.3.5
|
||||
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.29.11 // indirect
|
||||
k8s.io/cli-runtime v0.29.11 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.29.11 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/apiserver v0.26.1
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
sigs.k8s.io/controller-runtime v0.14.1
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
||||
BIN
hack/becausewecan.jpg
Normal file
BIN
hack/becausewecan.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 137 KiB |
@@ -4,25 +4,18 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
set -x
|
||||
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
|
||||
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
|
||||
|
||||
K8S_VERSION=$(cat go.mod | grep -m1 "k8s.io/apiserver" | cut -d " " -f 2)
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=./code-generator
|
||||
|
||||
# cd into the git dir to checkout the code gen version compatible with the k8s version that this is using
|
||||
cd $CODEGEN_PKG
|
||||
git fetch origin tag ${K8S_VERSION}
|
||||
git checkout ${K8S_VERSION}
|
||||
cd -
|
||||
|
||||
source ${CODEGEN_PKG}/kube_codegen.sh
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
--input-pkg-root "${SCRIPT_ROOT}/pkg/apis" \
|
||||
--output-base "${SCRIPT_ROOT}/pkg/apis"
|
||||
"${CODEGEN_PKG}/generate-groups.sh" \
|
||||
"deepcopy" \
|
||||
github.com/rancher/k3k/pkg/generated \
|
||||
github.com/rancher/k3k/pkg/apis \
|
||||
"k3k.io:v1alpha1" \
|
||||
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \
|
||||
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
|
||||
rm -rf code-generator
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
## Virtual Kubelet
|
||||
|
||||
This package provides an impelementation of a virtual cluster node using [virtual-kubelet](https://github.com/virtual-kubelet/virtual-kubelet).
|
||||
|
||||
The implementation is based on several projects, including:
|
||||
- [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet)
|
||||
- [Kubectl](https://github.com/kubernetes/kubectl)
|
||||
- [Client-go](https://github.com/kubernetes/client-go)
|
||||
- [Azure-Aci](https://github.com/virtual-kubelet/azure-aci)
|
||||
|
||||
## Overview
|
||||
|
||||
This project creates a node that registers itself in the virtual cluster. When workloads are scheduled to this node, it simply creates/updates the workload on the host cluster.
|
||||
|
||||
## Usage
|
||||
|
||||
Build/Push the image using (from the root of rancher/k3k):
|
||||
|
||||
```
|
||||
make build
|
||||
docker buildx build -f package/Dockerfile . -t $REPO/$IMAGE:$TAG
|
||||
```
|
||||
|
||||
When running, it is recommended to deploy a k3k cluster with 1 server (with `--disable-agent` as a server arg) and no agents (so that the workloads can only be scheduled on the virtual node/host cluster).
|
||||
|
||||
After the image is built, it should be deployed with the following ENV vars set:
|
||||
- `CLUSTER_NAME` should be the name of the cluster.
|
||||
- `CLUSTER_NAMESPACE` should be the namespace the cluster is running in.
|
||||
- `HOST_KUBECONFIG` should be the path on the local filesystem (in container) to a kubeconfig for the host cluster (likely stored in a secret/mounted as a volume).
|
||||
- `VIRT_KUBECONFIG`should be the path on the local filesystem (in container) to a kubeconfig for the virtual cluster (likely stored in a secret/mounted as a volume).
|
||||
- `VIRT_POD_IP` should be the IP that the container is accessible from.
|
||||
|
||||
This project is still under development and there are many features yet to be implemented, but it can run a basic nginx pod.
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// config has all virtual-kubelet startup options
|
||||
type config struct {
|
||||
ClusterName string `yaml:"clusterName,omitempty"`
|
||||
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
|
||||
ServiceName string `yaml:"serviceName,omitempty"`
|
||||
Token string `yaml:"token,omitempty"`
|
||||
AgentHostname string `yaml:"agentHostname,omitempty"`
|
||||
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
|
||||
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
|
||||
KubeletPort string `yaml:"kubeletPort,omitempty"`
|
||||
ServerIP string `yaml:"serverIP,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
}
|
||||
|
||||
func (c *config) unmarshalYAML(data []byte) error {
|
||||
var conf config
|
||||
|
||||
if err := yaml.Unmarshal(data, &conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.ClusterName == "" {
|
||||
c.ClusterName = conf.ClusterName
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
c.ClusterNamespace = conf.ClusterNamespace
|
||||
}
|
||||
|
||||
if c.HostConfigPath == "" {
|
||||
c.HostConfigPath = conf.HostConfigPath
|
||||
}
|
||||
|
||||
if c.VirtualConfigPath == "" {
|
||||
c.VirtualConfigPath = conf.VirtualConfigPath
|
||||
}
|
||||
|
||||
if c.KubeletPort == "" {
|
||||
c.KubeletPort = conf.KubeletPort
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
c.AgentHostname = conf.AgentHostname
|
||||
}
|
||||
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = conf.ServiceName
|
||||
}
|
||||
|
||||
if c.Token == "" {
|
||||
c.Token = conf.Token
|
||||
}
|
||||
|
||||
if c.ServerIP == "" {
|
||||
c.ServerIP = conf.ServerIP
|
||||
}
|
||||
|
||||
if c.Version == "" {
|
||||
c.Version = conf.Version
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
if c.ClusterName == "" {
|
||||
return errors.New("cluster name is not provided")
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
return errors.New("cluster namespace is not provided")
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
return errors.New("agent Hostname is not provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) parse(path string) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.unmarshalYAML(b)
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !c.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.ConfigMap
|
||||
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = c.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = c.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
return c.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Insert(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return c.removeHostConfigMap(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove configmap: %w", err)
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Delete(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vConfigMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return c.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type ControllerHandler struct {
|
||||
sync.RWMutex
|
||||
// Mgr is the manager used to run new controllers - from the virtual cluster
|
||||
Mgr manager.Manager
|
||||
// Scheme is the scheme used to run new controllers - from the virtual cluster
|
||||
Scheme runtime.Scheme
|
||||
// HostClient is the client used to communicate with the host cluster
|
||||
HostClient client.Client
|
||||
// VirtualClient is the client used to communicate with the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// Translator is the translator that will be used to adjust objects before they
|
||||
// are made on the host cluster
|
||||
Translator translate.ToHostTranslator
|
||||
// Logger is the logger that the controller will use to log errors
|
||||
Logger *k3klog.Logger
|
||||
// controllers are the controllers which are currently running
|
||||
controllers map[schema.GroupVersionKind]updateableReconciler
|
||||
}
|
||||
|
||||
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
|
||||
// be altered through the Add and Remove methods
|
||||
type updateableReconciler interface {
|
||||
reconcile.Reconciler
|
||||
AddResource(ctx context.Context, namespace string, name string) error
|
||||
RemoveResource(ctx context.Context, namespace string, name string) error
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
|
||||
c.RLock()
|
||||
|
||||
controllers := c.controllers
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
c.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we need to manually lock/unlock since we intned on write locking to add a new controller
|
||||
c.RUnlock()
|
||||
|
||||
var r updateableReconciler
|
||||
|
||||
switch obj.(type) {
|
||||
case *v1.Secret:
|
||||
r = &SecretSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
|
||||
// note that this doesn't do any type safety - fix this
|
||||
// when generics work
|
||||
c.Translator.TranslateTo(s)
|
||||
// Remove service-account-token types when synced to the host
|
||||
if s.Type == v1.SecretTypeServiceAccountToken {
|
||||
s.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
case *v1.ConfigMap:
|
||||
r = &ConfigMapSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
|
||||
c.Translator.TranslateTo(s)
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
default:
|
||||
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
|
||||
// logic could be used for other types.
|
||||
return fmt.Errorf("unrecognized type: %T", obj)
|
||||
}
|
||||
|
||||
err := ctrl.NewControllerManagedBy(c.Mgr).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
|
||||
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
|
||||
|
||||
c.Unlock()
|
||||
|
||||
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
|
||||
// since we aren't adding a new controller, we don't need to lock
|
||||
c.RLock()
|
||||
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
|
||||
c.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
|
||||
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcController = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := PVCReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(pvcController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handling persistent volume sync
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
podController = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(podController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.Pod{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
|
||||
var (
|
||||
virtPod v1.Pod
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handling pod
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// reconcile pods with pvcs
|
||||
for _, vol := range virtPod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
log.Info("Handling pod with pvc")
|
||||
|
||||
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
|
||||
// and then created on the host, the PV is not synced to the host cluster.
|
||||
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
|
||||
|
||||
var pvc v1.PersistentVolumeClaim
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: pvcSource.ClaimName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.virtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
orig := pv.DeepCopy()
|
||||
pv.Status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Patch the status of PersistentVolumeClaim to Bound")
|
||||
|
||||
pvcPatch := pvc.DeepCopy()
|
||||
if pvcPatch.Annotations == nil {
|
||||
pvcPatch.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
|
||||
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.virtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
var storageClass string
|
||||
|
||||
if obj.Spec.StorageClassName != nil {
|
||||
storageClass = *obj.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: obj.Name,
|
||||
Labels: map[string]string{
|
||||
pseudoPVLabel: "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
volume.AnnBoundByController: "true",
|
||||
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolume",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "pseudopv",
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClass,
|
||||
VolumeMode: obj.Spec.VolumeMode,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
AccessModes: obj.Spec.AccessModes,
|
||||
Capacity: obj.Spec.Resources.Requests,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
APIVersion: obj.APIVersion,
|
||||
UID: obj.UID,
|
||||
ResourceVersion: obj.ResourceVersion,
|
||||
Kind: obj.Kind,
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type SecretSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !s.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.Secret
|
||||
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = s.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = s.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
return s.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
s.mutex.Lock()
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Insert(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return s.removeHostSecret(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove secret: %w", err)
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Delete(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vSecret corev1.Secret
|
||||
err := s.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return s.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceSyncerController = "service-syncer-controller"
|
||||
maxConcurrentReconciles = 1
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
logger *log.Logger
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
|
||||
func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
// initialize a new Reconciler
|
||||
reconciler := ServiceReconciler{
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
logger: logger.Named(serviceSyncerController),
|
||||
Translator: translator,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
For(&v1.Service{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName)
|
||||
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
virtService v1.Service
|
||||
hostService v1.Service
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
// getting the cluster for setting the controller reference
|
||||
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedService := s.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtService.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := s.hostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := s.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
// create or update the service on host
|
||||
if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.hostClient.Create(ctx, syncedService)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating service on the host cluster")
|
||||
|
||||
return reconcile.Result{}, s.hostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
|
||||
hostService := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostService)
|
||||
// don't sync finalizers to the host
|
||||
return hostService
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
const (
|
||||
webhookName = "podmutator.k3k.io"
|
||||
webhookTimeout = int32(10)
|
||||
webhookPort = "9443"
|
||||
webhookPath = "/mutate--v1-pod"
|
||||
FieldpathField = "k3k.io/fieldpath"
|
||||
)
|
||||
|
||||
type webhookHandler struct {
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
serviceName string
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
logger: logger,
|
||||
serviceName: serviceName,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// create mutator webhook configuration to the cluster
|
||||
config, err := handler.configuration(ctx, hostClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := handler.client.Create(ctx, config); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// register webhook with the manager
|
||||
return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete()
|
||||
}
|
||||
|
||||
func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
|
||||
}
|
||||
|
||||
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
|
||||
// look for status.* fields in the env
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, env := range container.Env {
|
||||
if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := env.ValueFrom.FieldRef.FieldPath
|
||||
if strings.Contains(fieldPath, "status.") {
|
||||
annotationKey := fmt.Sprintf("%s_%d_%s", FieldpathField, i, env.Name)
|
||||
pod.Annotations[annotationKey] = fieldPath
|
||||
pod.Spec.Containers[i].Env = removeEnv(pod.Spec.Containers[i].Env, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Infow("extracting webhook tls from host cluster")
|
||||
|
||||
var (
|
||||
webhookTLSSecret v1.Secret
|
||||
)
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caBundle, ok := webhookTLSSecret.Data["ca.crt"]
|
||||
if !ok {
|
||||
return nil, errors.New("webhook CABundle does not exist in secret")
|
||||
}
|
||||
|
||||
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
|
||||
|
||||
return &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "admissionregistration.k8s.io/v1",
|
||||
Kind: "MutatingWebhookConfiguration",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookName + "-configuration",
|
||||
},
|
||||
Webhooks: []admissionregistrationv1.MutatingWebhook{
|
||||
{
|
||||
Name: webhookName,
|
||||
AdmissionReviewVersions: []string{"v1"},
|
||||
SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone),
|
||||
TimeoutSeconds: ptr.To(webhookTimeout),
|
||||
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||
URL: ptr.To(webhookURL),
|
||||
CABundle: caBundle,
|
||||
},
|
||||
Rules: []admissionregistrationv1.RuleWithOperations{
|
||||
{
|
||||
Operations: []admissionregistrationv1.OperationType{
|
||||
"CREATE",
|
||||
},
|
||||
Rule: admissionregistrationv1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
Scope: ptr.To(admissionregistrationv1.NamespacedScope),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func removeEnv(envs []v1.EnvVar, i int) []v1.EnvVar {
|
||||
envs[i] = envs[len(envs)-1]
|
||||
return envs[:len(envs)-1]
|
||||
}
|
||||
|
||||
func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) {
|
||||
s := strings.SplitN(annotationKey, "_", 3)
|
||||
if len(s) != 3 {
|
||||
return -1, "", errors.New("fieldpath annotation is not set correctly")
|
||||
}
|
||||
|
||||
containerIndex, err := strconv.Atoi(s[1])
|
||||
if err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
envName := s[2]
|
||||
|
||||
return containerIndex, envName, nil
|
||||
}
|
||||
@@ -1,428 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
var (
|
||||
baseScheme = runtime.NewScheme()
|
||||
k3kKubeletName = "k3k-kubelet"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(baseScheme)
|
||||
_ = v1alpha1.AddToScheme(baseScheme)
|
||||
}
|
||||
|
||||
type kubelet struct {
|
||||
virtualCluster v1alpha1.Cluster
|
||||
|
||||
name string
|
||||
port int
|
||||
hostConfig *rest.Config
|
||||
virtConfig *rest.Config
|
||||
agentIP string
|
||||
dnsIP string
|
||||
hostClient ctrlruntimeclient.Client
|
||||
virtClient kubernetes.Interface
|
||||
hostMgr manager.Manager
|
||||
virtualMgr manager.Manager
|
||||
node *nodeutil.Node
|
||||
logger *k3klog.Logger
|
||||
token string
|
||||
}
|
||||
|
||||
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostClient, err := ctrlruntimeclient.New(hostConfig, ctrlruntimeclient.Options{
|
||||
Scheme: baseScheme,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtClient, err := kubernetes.NewForConfig(virtConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
|
||||
Scheme: baseScheme,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: c.ClusterNamespace,
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8083",
|
||||
},
|
||||
Cache: cache.Options{
|
||||
DefaultNamespaces: map[string]cache.Config{
|
||||
c.ClusterNamespace: {},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create controller-runtime mgr for host cluster: " + err.Error())
|
||||
}
|
||||
|
||||
// virtual client will only use core types (for now), no need to add anything other than the basics
|
||||
virtualScheme := runtime.NewScheme()
|
||||
if err := clientgoscheme.AddToScheme(virtualScheme); err != nil {
|
||||
return nil, errors.New("unable to add client go types to virtual cluster scheme: " + err.Error())
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: "/opt/rancher/k3k-webhook",
|
||||
})
|
||||
|
||||
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
|
||||
Scheme: virtualScheme,
|
||||
WebhookServer: webhookServer,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: "kube-system",
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8084",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutator webhook")
|
||||
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace, k3klog.New(false)); err != nil {
|
||||
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
|
||||
}
|
||||
|
||||
// get the cluster's DNS IP to be injected to pods
|
||||
var dnsService v1.Service
|
||||
|
||||
dnsName := controller.SafeConcatNameWithPrefix(c.ClusterName, "kube-dns")
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: dnsName, Namespace: c.ClusterNamespace}, &dnsService); err != nil {
|
||||
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
|
||||
}
|
||||
|
||||
var virtualCluster v1alpha1.Cluster
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
|
||||
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
|
||||
}
|
||||
|
||||
return &kubelet{
|
||||
virtualCluster: virtualCluster,
|
||||
|
||||
name: c.AgentHostname,
|
||||
hostConfig: hostConfig,
|
||||
hostClient: hostClient,
|
||||
virtConfig: virtConfig,
|
||||
virtClient: virtClient,
|
||||
hostMgr: hostMgr,
|
||||
virtualMgr: virtualMgr,
|
||||
agentIP: clusterIP,
|
||||
logger: logger.Named(k3kKubeletName),
|
||||
token: c.Token,
|
||||
dnsIP: dnsService.Spec.ClusterIP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostClient ctrlruntimeclient.Client) (string, error) {
|
||||
var service v1.Service
|
||||
|
||||
serviceKey := types.NamespacedName{
|
||||
Namespace: clusterNamespace,
|
||||
Name: serviceName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, serviceKey, &service); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
|
||||
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
|
||||
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
k.node, err = nodeutil.NewNode(k.name, providerFunc, nodeutil.WithClient(k.virtClient), nodeOpts)
|
||||
if err != nil {
|
||||
return errors.New("unable to start kubelet: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kubelet) start(ctx context.Context) {
|
||||
// any one of the following 3 tasks (host manager, virtual manager, node) crashing will stop the
|
||||
// program, and all 3 of them block on start, so we start them here in go-routines
|
||||
go func() {
|
||||
err := k.hostMgr.Start(ctx)
|
||||
if err != nil {
|
||||
k.logger.Fatalw("host manager stopped", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err := k.virtualMgr.Start(ctx)
|
||||
if err != nil {
|
||||
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// run the node async so that we can wait for it to be ready in another call
|
||||
|
||||
go func() {
|
||||
ctx = log.WithLogger(ctx, k.logger)
|
||||
if err := k.node.Run(ctx); err != nil {
|
||||
k.logger.Fatalw("node errored when running", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
|
||||
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
|
||||
}
|
||||
|
||||
<-k.node.Done()
|
||||
|
||||
if err := k.node.Err(); err != nil {
|
||||
k.logger.Fatalw("node stopped with an error", zap.Error(err))
|
||||
}
|
||||
|
||||
k.logger.Info("node exited successfully")
|
||||
}
|
||||
|
||||
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
|
||||
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
|
||||
}
|
||||
|
||||
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
|
||||
|
||||
return utilProvider, &provider.Node{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
|
||||
// set up the routes
|
||||
mux := http.NewServeMux()
|
||||
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
|
||||
return errors.New("unable to attach routes: " + err.Error())
|
||||
}
|
||||
|
||||
c.Handler = mux
|
||||
|
||||
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
|
||||
if err != nil {
|
||||
return errors.New("unable to get tls config: " + err.Error())
|
||||
}
|
||||
|
||||
c.TLSConfig = tlsConfig
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
|
||||
if virtualConfigPath != "" {
|
||||
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
|
||||
}
|
||||
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
|
||||
var cluster v1alpha1.Cluster
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
|
||||
|
||||
var b *bootstrap.ControlRuntimeBootstrap
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
logger.Infow("decoded bootstrap", zap.Error(err))
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
adminCert, adminKey, err := certs.CreateClientCertKey(
|
||||
controller.AdminCommonName,
|
||||
[]string{user.SystemPrivilegedGroup},
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
time.Hour*24*time.Duration(356),
|
||||
b.ClientCA.Content,
|
||||
b.ClientCAKey.Content,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d", server.ServiceName(cluster.Name), server.ServerPort)
|
||||
|
||||
kubeconfigData, err := kubeconfigBytes(url, []byte(b.ServerCA.Content), adminCert, adminKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientcmd.RESTConfigFromKubeConfig(kubeconfigData)
|
||||
}
|
||||
|
||||
func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.CertificateAuthorityData = serverCA
|
||||
cluster.Server = url
|
||||
|
||||
authInfo := clientcmdapi.NewAuthInfo()
|
||||
authInfo.ClientCertificateData = clientCert
|
||||
authInfo.ClientKeyData = clientKey
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.AuthInfo = "default"
|
||||
context.Cluster = "default"
|
||||
|
||||
config.Clusters["default"] = cluster
|
||||
config.AuthInfos["default"] = authInfo
|
||||
config.Contexts["default"] = context
|
||||
config.CurrentContext = "default"
|
||||
|
||||
return clientcmd.Write(*config)
|
||||
}
|
||||
|
||||
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
|
||||
var (
|
||||
cluster v1alpha1.Cluster
|
||||
b *bootstrap.ControlRuntimeBootstrap
|
||||
)
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
ip := net.ParseIP(agentIP)
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{hostname},
|
||||
IPs: []net.IP{ip},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get cert and key: " + err.Error())
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to get key pair: " + err.Error())
|
||||
}
|
||||
|
||||
// create rootCA CertPool
|
||||
certs, err := certutil.ParseCertsPEM([]byte(b.ServerCA.Content))
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create ca certs: " + err.Error())
|
||||
}
|
||||
|
||||
if len(certs) < 1 {
|
||||
return nil, errors.New("ca cert is not parsed correctly")
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AddCert(certs[0])
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
var (
|
||||
configFile string
|
||||
cfg config
|
||||
logger *log.Logger
|
||||
debug bool
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3k-kubelet"
|
||||
app.Usage = "virtual kubelet implementation k3k"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-name",
|
||||
Usage: "Name of the k3k cluster",
|
||||
Destination: &cfg.ClusterName,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-namespace",
|
||||
Usage: "Namespace of the k3k cluster",
|
||||
Destination: &cfg.ClusterNamespace,
|
||||
EnvVars: []string{"CLUSTER_NAMESPACE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-token",
|
||||
Usage: "K3S token of the k3k cluster",
|
||||
Destination: &cfg.Token,
|
||||
EnvVars: []string{"CLUSTER_TOKEN"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-config-path",
|
||||
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
|
||||
Destination: &cfg.HostConfigPath,
|
||||
EnvVars: []string{"HOST_KUBECONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "virtual-config-path",
|
||||
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
|
||||
Destination: &cfg.VirtualConfigPath,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubelet-port",
|
||||
Usage: "kubelet API port number",
|
||||
Destination: &cfg.KubeletPort,
|
||||
EnvVars: []string{"SERVER_PORT"},
|
||||
Value: "10250",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-name",
|
||||
Usage: "The service name deployed by the k3k controller",
|
||||
Destination: &cfg.ServiceName,
|
||||
EnvVars: []string{"SERVICE_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "agent-hostname",
|
||||
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
|
||||
Destination: &cfg.AgentHostname,
|
||||
EnvVars: []string{"AGENT_HOSTNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "server-ip",
|
||||
Usage: "Server IP used for registering the virtual kubelet to the cluster",
|
||||
Destination: &cfg.ServerIP,
|
||||
EnvVars: []string{"SERVER_IP"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "Version of kubernetes server",
|
||||
Destination: &cfg.Version,
|
||||
EnvVars: []string{"VERSION"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to k3k-kubelet config file",
|
||||
Destination: &configFile,
|
||||
EnvVars: []string{"CONFIG_FILE"},
|
||||
Value: "/etc/rancher/k3k/config.yaml",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"DEBUG"},
|
||||
},
|
||||
}
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
return nil
|
||||
}
|
||||
app.Action = run
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if err := cfg.parse(configFile); err != nil {
|
||||
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
logger.Fatalw("failed to validate config", zap.Error(err))
|
||||
}
|
||||
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
if err != nil {
|
||||
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
|
||||
logger.Fatalw("failed to register new node", zap.Error(err))
|
||||
}
|
||||
|
||||
k.start(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
/*
|
||||
Copyright (c) Microsoft Corporation.
|
||||
Licensed under the Apache 2.0 license.
|
||||
|
||||
See https://github.com/virtual-kubelet/azure-aci/tree/master/pkg/metrics/collectors
|
||||
*/
|
||||
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
)
|
||||
|
||||
// defining metrics
|
||||
var (
|
||||
nodeCPUUsageDesc = compbasemetrics.NewDesc("node_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed by the node in core-seconds",
|
||||
nil,
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
nodeMemoryUsageDesc = compbasemetrics.NewDesc("node_memory_working_set_bytes",
|
||||
"Current working set of the node in bytes",
|
||||
nil,
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
containerCPUUsageDesc = compbasemetrics.NewDesc("container_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed by the container in core-seconds",
|
||||
[]string{"container", "pod", "namespace"},
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
containerMemoryUsageDesc = compbasemetrics.NewDesc("container_memory_working_set_bytes",
|
||||
"Current working set of the container in bytes",
|
||||
[]string{"container", "pod", "namespace"},
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
podCPUUsageDesc = compbasemetrics.NewDesc("pod_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed by the pod in core-seconds",
|
||||
[]string{"pod", "namespace"},
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
podMemoryUsageDesc = compbasemetrics.NewDesc("pod_memory_working_set_bytes",
|
||||
"Current working set of the pod in bytes",
|
||||
[]string{"pod", "namespace"},
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
resourceScrapeResultDesc = compbasemetrics.NewDesc("scrape_error",
|
||||
"1 if there was an error while getting container metrics, 0 otherwise",
|
||||
nil,
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
|
||||
containerStartTimeDesc = compbasemetrics.NewDesc("container_start_time_seconds",
|
||||
"Start time of the container since unix epoch in seconds",
|
||||
[]string{"container", "pod", "namespace"},
|
||||
nil,
|
||||
compbasemetrics.ALPHA,
|
||||
"")
|
||||
)
|
||||
|
||||
// NewResourceMetricsCollector returns a metrics.StableCollector which exports resource metrics
|
||||
func NewKubeletResourceMetricsCollector(podStats *stats.Summary) compbasemetrics.StableCollector {
|
||||
return &resourceMetricsCollector{
|
||||
providerPodStats: podStats,
|
||||
}
|
||||
}
|
||||
|
||||
type resourceMetricsCollector struct {
|
||||
compbasemetrics.BaseStableCollector
|
||||
|
||||
providerPodStats *stats.Summary
|
||||
}
|
||||
|
||||
// Check if resourceMetricsCollector implements necessary interface
|
||||
var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
|
||||
|
||||
// DescribeWithStability implements compbasemetrics.StableCollector
|
||||
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
|
||||
ch <- nodeCPUUsageDesc
|
||||
ch <- nodeMemoryUsageDesc
|
||||
ch <- containerStartTimeDesc
|
||||
ch <- containerCPUUsageDesc
|
||||
ch <- containerMemoryUsageDesc
|
||||
ch <- podCPUUsageDesc
|
||||
ch <- podMemoryUsageDesc
|
||||
ch <- resourceScrapeResultDesc
|
||||
}
|
||||
|
||||
// CollectWithStability implements compbasemetrics.StableCollector
|
||||
// Since new containers are frequently created and removed, using the Gauge would
|
||||
// leak metric collectors for containers or pods that no longer exist. Instead, implement
|
||||
// custom collector in a way that only collects metrics for active containers.
|
||||
func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) {
|
||||
var errorCount float64
|
||||
|
||||
defer func() {
|
||||
ch <- compbasemetrics.NewLazyConstMetric(resourceScrapeResultDesc, compbasemetrics.GaugeValue, errorCount)
|
||||
}()
|
||||
|
||||
statsSummary := *rc.providerPodStats
|
||||
rc.collectNodeCPUMetrics(ch, statsSummary.Node)
|
||||
rc.collectNodeMemoryMetrics(ch, statsSummary.Node)
|
||||
|
||||
for _, pod := range statsSummary.Pods {
|
||||
for _, container := range pod.Containers {
|
||||
rc.collectContainerStartTime(ch, pod, container)
|
||||
rc.collectContainerCPUMetrics(ch, pod, container)
|
||||
rc.collectContainerMemoryMetrics(ch, pod, container)
|
||||
}
|
||||
|
||||
rc.collectPodCPUMetrics(ch, pod)
|
||||
rc.collectPodMemoryMetrics(ch, pod)
|
||||
}
|
||||
}
|
||||
|
||||
// implement collector methods and validate that correct data is used
|
||||
|
||||
func (rc *resourceMetricsCollector) collectNodeCPUMetrics(ch chan<- compbasemetrics.Metric, s stats.NodeStats) {
|
||||
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(nodeCPUUsageDesc, compbasemetrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second)))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectNodeMemoryMetrics(ch chan<- compbasemetrics.Metric, s stats.NodeStats) {
|
||||
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(nodeMemoryUsageDesc, compbasemetrics.GaugeValue, float64(*s.Memory.WorkingSetBytes)))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectContainerStartTime(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
|
||||
if s.StartTime.Unix() <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.StartTime.Time,
|
||||
compbasemetrics.NewLazyConstMetric(containerStartTimeDesc, compbasemetrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
|
||||
if s.CPU == nil || s.CPU.UsageCoreNanoSeconds == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(containerCPUUsageDesc, compbasemetrics.CounterValue,
|
||||
float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectContainerMemoryMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats, s stats.ContainerStats) {
|
||||
if s.Memory == nil || s.Memory.WorkingSetBytes == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(s.Memory.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(containerMemoryUsageDesc, compbasemetrics.GaugeValue,
|
||||
float64(*s.Memory.WorkingSetBytes), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectPodCPUMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats) {
|
||||
if pod.CPU == nil || pod.CPU.UsageCoreNanoSeconds == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(pod.CPU.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(podCPUUsageDesc, compbasemetrics.CounterValue,
|
||||
float64(*pod.CPU.UsageCoreNanoSeconds)/float64(time.Second), pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectPodMemoryMetrics(ch chan<- compbasemetrics.Metric, pod stats.PodStats) {
|
||||
if pod.Memory == nil || pod.Memory.WorkingSetBytes == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ch <- compbasemetrics.NewLazyMetricWithTimestamp(pod.Memory.Time.Time,
|
||||
compbasemetrics.NewLazyConstMetric(podMemoryUsageDesc, compbasemetrics.GaugeValue,
|
||||
float64(*pod.Memory.WorkingSetBytes), pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
|
||||
node.Status.Conditions = nodeConditions()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: hostname,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: ip,
|
||||
},
|
||||
}
|
||||
|
||||
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
|
||||
node.Labels["kubernetes.io/os"] = "linux"
|
||||
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
node.Status.NodeInfo.KubeProxyVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// nodeConditions returns the basic conditions which mark the node as ready
|
||||
func nodeConditions() []v1.NodeCondition {
|
||||
return []v1.NodeCondition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is ready.",
|
||||
},
|
||||
{
|
||||
Type: "OutOfDisk",
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: "kubelet has sufficient disk space available",
|
||||
},
|
||||
{
|
||||
Type: "MemoryPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientMemory",
|
||||
Message: "kubelet has sufficient memory available",
|
||||
},
|
||||
{
|
||||
Type: "DiskPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasNoDiskPressure",
|
||||
Message: "kubelet has no disk pressure",
|
||||
},
|
||||
{
|
||||
Type: "NetworkUnavailable",
|
||||
Status: v1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "RouteCreated",
|
||||
Message: "RouteController created a route",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
|
||||
// If the nodeLabels are specified only the matching nodes will be considered.
|
||||
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var virtualNode corev1.Node
|
||||
if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualNode.Status.Capacity = capacity
|
||||
virtualNode.Status.Allocatable = allocatable
|
||||
|
||||
return virtualClient.Status().Update(ctx, &virtualNode)
|
||||
}
|
||||
|
||||
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
|
||||
// If some node labels are specified only the matching nodes will be considered.
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if nodeLabels != nil {
|
||||
labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels}
|
||||
listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String()
|
||||
}
|
||||
|
||||
nodeList, err := coreClient.Nodes().List(ctx, listOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// sum all
|
||||
virtualCapacityResources := corev1.ResourceList{}
|
||||
virtualAvailableResources := corev1.ResourceList{}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
// check if the node is Ready
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type != corev1.NodeReady {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the node is not Ready then we can skip it
|
||||
if condition.Status != corev1.ConditionTrue {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// add all the available metrics to the virtual node
|
||||
for resourceName, resourceQuantity := range node.Status.Capacity {
|
||||
virtualResource := virtualCapacityResources[resourceName]
|
||||
|
||||
(&virtualResource).Add(resourceQuantity)
|
||||
virtualCapacityResources[resourceName] = virtualResource
|
||||
}
|
||||
|
||||
for resourceName, resourceQuantity := range node.Status.Allocatable {
|
||||
virtualResource := virtualAvailableResources[resourceName]
|
||||
|
||||
(&virtualResource).Add(resourceQuantity)
|
||||
virtualAvailableResources[resourceName] = virtualResource
|
||||
}
|
||||
}
|
||||
|
||||
return virtualCapacityResources, virtualAvailableResources, nil
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// Node implements the node.Provider interface from Virtual Kubelet
|
||||
type Node struct {
|
||||
notifyCallback func(*corev1.Node)
|
||||
}
|
||||
|
||||
// Ping is called to check if the node is healthy - in the current format it always is
|
||||
func (n *Node) Ping(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyNodeStatus sets the callback function for a node being changed. As of now, no changes are made
|
||||
func (n *Node) NotifyNodeStatus(ctx context.Context, cb func(*corev1.Node)) {
|
||||
n.notifyCallback = cb
|
||||
}
|
||||
@@ -1,962 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"errors"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
// check at compile time if the Provider implements the nodeutil.Provider interface
|
||||
var _ nodeutil.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider implements nodetuil.Provider from virtual Kubelet.
|
||||
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
|
||||
type Provider struct {
|
||||
Handler controller.ControllerHandler
|
||||
Translator translate.ToHostTranslator
|
||||
HostClient client.Client
|
||||
VirtualClient client.Client
|
||||
ClientConfig rest.Config
|
||||
CoreClient cv1.CoreV1Interface
|
||||
ClusterNamespace string
|
||||
ClusterName string
|
||||
serverIP string
|
||||
dnsIP string
|
||||
logger *k3klog.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
ErrRetryTimeout = errors.New("provider timed out")
|
||||
)
|
||||
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
coreClient, err := cv1.NewForConfig(&hostConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: name,
|
||||
ClusterNamespace: namespace,
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
Handler: controller.ControllerHandler{
|
||||
Mgr: virtualMgr,
|
||||
Scheme: *virtualMgr.GetScheme(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
Translator: translator,
|
||||
Logger: logger,
|
||||
},
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
Translator: translator,
|
||||
ClientConfig: hostConfig,
|
||||
CoreClient: coreClient,
|
||||
ClusterNamespace: namespace,
|
||||
ClusterName: name,
|
||||
logger: logger,
|
||||
serverIP: serverIP,
|
||||
dnsIP: dnsIP,
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// GetContainerLogs retrieves the logs of a container by name from the provider.
|
||||
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
options := corev1.PodLogOptions{
|
||||
Container: containerName,
|
||||
Timestamps: opts.Timestamps,
|
||||
Follow: opts.Follow,
|
||||
Previous: opts.Previous,
|
||||
}
|
||||
|
||||
if opts.Tail != 0 {
|
||||
tailLines := int64(opts.Tail)
|
||||
options.TailLines = &tailLines
|
||||
}
|
||||
|
||||
if opts.LimitBytes != 0 {
|
||||
limitBytes := int64(opts.LimitBytes)
|
||||
options.LimitBytes = &limitBytes
|
||||
}
|
||||
|
||||
if opts.SinceSeconds != 0 {
|
||||
sinceSeconds := int64(opts.SinceSeconds)
|
||||
options.SinceSeconds = &sinceSeconds
|
||||
}
|
||||
|
||||
if !opts.SinceTime.IsZero() {
|
||||
sinceTime := metav1.NewTime(opts.SinceTime)
|
||||
options.SinceTime = &sinceTime
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
|
||||
|
||||
return closer, err
|
||||
}
|
||||
|
||||
// RunInContainer executes a command in a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("exec")
|
||||
req.VersionedParams(&corev1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: cmd,
|
||||
TTY: attach.TTY(),
|
||||
Stdin: attach.Stdin() != nil,
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
Stderr: attach.Stderr(),
|
||||
Tty: attach.TTY(),
|
||||
TerminalSizeQueue: &translatorSizeQueue{
|
||||
resizeChan: attach.Resize(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AttachToContainer attaches to the executing process of a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, containerName string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("attach")
|
||||
req.VersionedParams(&corev1.PodAttachOptions{
|
||||
Container: containerName,
|
||||
TTY: attach.TTY(),
|
||||
Stdin: attach.Stdin() != nil,
|
||||
Stdout: attach.Stdout() != nil,
|
||||
Stderr: attach.Stderr() != nil,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
Stderr: attach.Stderr(),
|
||||
Tty: attach.TTY(),
|
||||
TerminalSizeQueue: &translatorSizeQueue{
|
||||
resizeChan: attach.Resize(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GetStatsSummary gets the stats for the node, including running pods
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*statsv1alpha1.Summary, error) {
|
||||
p.logger.Debug("GetStatsSummary")
|
||||
|
||||
nodeList := &v1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
|
||||
// fetch the stats from all the nodes
|
||||
var (
|
||||
nodeStats statsv1alpha1.NodeStats
|
||||
allPodsStats []statsv1alpha1.PodStats
|
||||
)
|
||||
|
||||
for _, n := range nodeList.Items {
|
||||
res, err := p.CoreClient.RESTClient().
|
||||
Get().
|
||||
Resource("nodes").
|
||||
Name(n.Name).
|
||||
SubResource("proxy").
|
||||
Suffix("stats/summary").
|
||||
DoRaw(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to get stats of node '%s', from cluster %s in namespace %s: %w",
|
||||
n.Name, p.ClusterName, p.ClusterNamespace, err,
|
||||
)
|
||||
}
|
||||
|
||||
stats := &statsv1alpha1.Summary{}
|
||||
if err := json.Unmarshal(res, stats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: we should probably calculate somehow the node stats from the different nodes of the host
|
||||
// or reflect different nodes from the virtual kubelet.
|
||||
// For the moment let's just pick one random node stats.
|
||||
nodeStats = stats.Node
|
||||
allPodsStats = append(allPodsStats, stats.Pods...)
|
||||
}
|
||||
|
||||
pods, err := p.GetPods(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podsNameMap := make(map[string]*v1.Pod)
|
||||
|
||||
for _, pod := range pods {
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
podsNameMap[hostPodName] = pod
|
||||
}
|
||||
|
||||
filteredStats := &statsv1alpha1.Summary{
|
||||
Node: nodeStats,
|
||||
Pods: make([]statsv1alpha1.PodStats, 0),
|
||||
}
|
||||
|
||||
for _, podStat := range allPodsStats {
|
||||
// skip pods that are not in the cluster namespace
|
||||
if podStat.PodRef.Namespace != p.ClusterNamespace {
|
||||
continue
|
||||
}
|
||||
|
||||
// rewrite the PodReference to match the data of the virtual cluster
|
||||
if pod, found := podsNameMap[podStat.PodRef.Name]; found {
|
||||
podStat.PodRef = statsv1alpha1.PodReference{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
UID: string(pod.UID),
|
||||
}
|
||||
filteredStats.Pods = append(filteredStats.Pods, podStat)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredStats, nil
|
||||
}
|
||||
|
||||
// GetMetricsResource gets the metrics for the node, including running pods
|
||||
func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily, error) {
|
||||
statsSummary, err := p.GetStatsSummary(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error fetching MetricsResource"))
|
||||
}
|
||||
|
||||
registry := compbasemetrics.NewKubeRegistry()
|
||||
registry.CustomMustRegister(collectors.NewKubeletResourceMetricsCollector(statsSummary))
|
||||
|
||||
metricFamily, err := registry.Gather()
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
|
||||
}
|
||||
|
||||
return metricFamily, nil
|
||||
}
|
||||
|
||||
// PortForward forwards a local port to a port on the pod
|
||||
func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port int32, stream io.ReadWriteCloser) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, pod)
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("portforward")
|
||||
|
||||
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL())
|
||||
portAsString := strconv.Itoa(int(port))
|
||||
readyChannel := make(chan struct{})
|
||||
stopChannel := make(chan struct{}, 1)
|
||||
|
||||
// Today this doesn't work properly. When the port ward is supposed to stop, the caller (this provider)
|
||||
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
|
||||
// so more work is needed to detect a close and handle that appropriately.
|
||||
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fw.ForwardPorts()
|
||||
}
|
||||
|
||||
// CreatePod executes createPod with retry
|
||||
func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.createPod, pod)
|
||||
}
|
||||
|
||||
// createPod takes a Kubernetes Pod and deploys it within the provider.
|
||||
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
tPod := pod.DeepCopy()
|
||||
p.Translator.TranslateTo(tPod)
|
||||
|
||||
// get Cluster definition
|
||||
clusterKey := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.ClusterName,
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
|
||||
// these values shouldn't be set on create
|
||||
tPod.UID = ""
|
||||
tPod.ResourceVersion = ""
|
||||
|
||||
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
|
||||
tPod.Spec.NodeName = ""
|
||||
|
||||
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if pod.Spec.Hostname == "" {
|
||||
tPod.Spec.Hostname = pod.Name
|
||||
}
|
||||
|
||||
// if the priorityCluss for the virtual cluster is set then override the provided value
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
|
||||
// host cluster version
|
||||
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// sync serviceaccount token to a the host cluster
|
||||
if err := p.transformTokens(ctx, pod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
p.logger.Infow("creating pod",
|
||||
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
}
|
||||
|
||||
// withRetry retries passed function with interval and timeout
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
|
||||
const (
|
||||
interval = 2 * time.Second
|
||||
timeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var allErrors error
|
||||
|
||||
// retryFn will retry until the operation succeed, or the timeout occurs
|
||||
retryFn := func(ctx context.Context) (bool, error) {
|
||||
if lastErr := f(ctx, pod); lastErr != nil {
|
||||
// log that the retry failed?
|
||||
allErrors = errors.Join(allErrors, lastErr)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, retryFn); err != nil {
|
||||
return errors.Join(allErrors, ErrRetryTimeout)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
|
||||
// if one/more volumes couldn't be transformed
|
||||
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
var optional bool
|
||||
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
|
||||
if volume.ConfigMap != nil {
|
||||
if volume.ConfigMap.Optional != nil {
|
||||
optional = *volume.ConfigMap.Optional
|
||||
}
|
||||
|
||||
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
if volume.Secret.Optional != nil {
|
||||
optional = *volume.Secret.Optional
|
||||
}
|
||||
|
||||
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
if source.ConfigMap.Optional != nil {
|
||||
optional = *source.ConfigMap.Optional
|
||||
}
|
||||
|
||||
configMapName := source.ConfigMap.Name
|
||||
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
|
||||
}
|
||||
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
|
||||
} else if source.Secret != nil {
|
||||
if source.Secret.Optional != nil {
|
||||
optional = *source.Secret.Optional
|
||||
}
|
||||
|
||||
secretName := source.Secret.Name
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
|
||||
} else if volume.DownwardAPI != nil {
|
||||
for _, downwardAPI := range volume.DownwardAPI.Items {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: configMapName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
|
||||
// check if its optional configmap
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
|
||||
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
|
||||
|
||||
var secret corev1.Secret
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: secretName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePod executes updatePod with retry
|
||||
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.updatePod, pod)
|
||||
}
|
||||
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
p.logger.Debugw("got a request for update pod")
|
||||
|
||||
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
|
||||
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
|
||||
|
||||
// Update Pod in the virtual cluster
|
||||
|
||||
var currentVirtualPod v1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
|
||||
}
|
||||
|
||||
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
currentVirtualPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentVirtualPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
// in the virtual cluster we can update also the labels and annotations
|
||||
currentVirtualPod.Annotations = pod.Annotations
|
||||
currentVirtualPod.Labels = pod.Labels
|
||||
|
||||
if err := p.VirtualClient.Update(ctx, ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the virtual cluster: %w", err)
|
||||
}
|
||||
|
||||
// Update Pod in the host cluster
|
||||
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
|
||||
}
|
||||
|
||||
var currentHostPod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
|
||||
}
|
||||
|
||||
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
// update ActiveDeadlineSeconds and Tolerations
|
||||
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
if err := p.HostClient.Update(ctx, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainerImages will update the images of the original container images with the same name
|
||||
func updateContainerImages(original, updated []v1.Container) []v1.Container {
|
||||
newImages := make(map[string]string)
|
||||
|
||||
for _, c := range updated {
|
||||
newImages[c.Name] = c.Image
|
||||
}
|
||||
|
||||
for i, c := range original {
|
||||
if updatedImage, found := newImages[c.Name]; found {
|
||||
original[i].Image = updatedImage
|
||||
}
|
||||
}
|
||||
|
||||
return original
|
||||
}
|
||||
|
||||
// DeletePod executes deletePod with retry
|
||||
func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.deletePod, pod)
|
||||
}
|
||||
|
||||
// deletePod takes a Kubernetes Pod and deletes it from the provider. Once a pod is deleted, the provider is
|
||||
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
|
||||
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
|
||||
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Infof("Got request to delete pod %s", pod.Name)
|
||||
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
|
||||
// note that we don't return an error here. The pod was successfully deleted, another process
|
||||
// should clean this without affecting the user
|
||||
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Deleted pod %s", pod.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
|
||||
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
|
||||
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
|
||||
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
|
||||
// for pruning
|
||||
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
|
||||
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
|
||||
|
||||
var pods corev1.PodList
|
||||
// only pods in the same namespace could be using secrets/configmaps that this pod is using
|
||||
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
|
||||
Namespace: pod.Namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
for _, vPod := range pods.Items {
|
||||
if vPod.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
|
||||
pruneSecrets.Delete(secrets...)
|
||||
pruneConfigMap.Delete(configMaps...)
|
||||
}
|
||||
|
||||
for _, secretName := range pruneSecrets.UnsortedList() {
|
||||
var secret corev1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
|
||||
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, configMapName := range pruneConfigMap.UnsortedList() {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: configMapName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPod retrieves a pod by name from the provider (can be cached).
|
||||
// The Pod returned is expected to be immutable, and may be accessed
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
|
||||
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(namespace, name),
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
|
||||
return nil, fmt.Errorf("error when retrieving pod: %w", err)
|
||||
}
|
||||
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
|
||||
return &pod, nil
|
||||
}
|
||||
|
||||
// GetPodStatus retrieves the status of a pod by name from the provider.
|
||||
// The PodStatus returned is expected to be immutable, and may be accessed
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
|
||||
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
|
||||
|
||||
pod, err := p.GetPod(ctx, namespace, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get pod for status: %w", err)
|
||||
}
|
||||
|
||||
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
|
||||
|
||||
return pod.Status.DeepCopy(), nil
|
||||
}
|
||||
|
||||
// GetPods retrieves a list of all pods running on the provider (can be cached).
|
||||
// The Pods returned are expected to be immutable, and may be accessed
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
selector := labels.NewSelector()
|
||||
|
||||
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create label selector: %w", err)
|
||||
}
|
||||
|
||||
selector = selector.Add(*requirement)
|
||||
|
||||
var podList corev1.PodList
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
retPods := []*corev1.Pod{}
|
||||
|
||||
for _, pod := range podList.DeepCopy().Items {
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
retPods = append(retPods, &pod)
|
||||
}
|
||||
|
||||
return retPods, nil
|
||||
}
|
||||
|
||||
// configureNetworking will inject network information to each pod to connect them to the
|
||||
// virtual cluster api server, as well as confiugre DNS information to connect them to the
|
||||
// synced coredns on the host cluster.
|
||||
func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP string) {
|
||||
// inject serverIP to hostalias for the pod
|
||||
pod.Spec.HostAliases = append(pod.Spec.HostAliases, corev1.HostAlias{
|
||||
IP: serverIP,
|
||||
Hostnames: []string{
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
},
|
||||
})
|
||||
|
||||
// injecting cluster DNS IP to the pods except for coredns pod
|
||||
if !strings.HasPrefix(podName, "coredns") {
|
||||
pod.Spec.DNSPolicy = corev1.DNSNone
|
||||
pod.Spec.DNSConfig = &corev1.PodDNSConfig{
|
||||
Nameservers: []string{
|
||||
dnsIP,
|
||||
},
|
||||
Searches: []string{
|
||||
podNamespace + ".svc.cluster.local",
|
||||
"svc.cluster.local",
|
||||
"cluster.local",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
updatedEnvVars := []corev1.EnvVar{
|
||||
{Name: "KUBERNETES_PORT", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_SERVICE_HOST", Value: serverIP},
|
||||
{Name: "KUBERNETES_SERVICE_PORT", Value: "6443"},
|
||||
{Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: "6443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP", Value: "tcp://" + serverIP + ":6443"},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_ADDR", Value: serverIP},
|
||||
{Name: "KUBERNETES_PORT_443_TCP_PORT", Value: "6443"},
|
||||
}
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
}
|
||||
|
||||
// overrideEnvVars will override the orig environment variables if found in the updated list
|
||||
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
if len(updated) == 0 {
|
||||
return orig
|
||||
}
|
||||
|
||||
// create map for single lookup
|
||||
updatedEnvVarMap := make(map[string]corev1.EnvVar)
|
||||
for _, updatedEnvVar := range updated {
|
||||
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
|
||||
}
|
||||
|
||||
for i, origEnvVar := range orig {
|
||||
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
|
||||
orig[i] = updatedEnvVar
|
||||
}
|
||||
}
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
|
||||
// for removing/seeing which virtual cluster resources need to be in the host cluster.
|
||||
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
var (
|
||||
secrets []string
|
||||
configMaps []string
|
||||
)
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Secret != nil {
|
||||
secrets = append(secrets, volume.Secret.SecretName)
|
||||
} else if volume.ConfigMap != nil {
|
||||
configMaps = append(configMaps, volume.ConfigMap.Name)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
configMaps = append(configMaps, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
secrets = append(secrets, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, configMaps
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
// override metadata.name and metadata.namespace with pod annotations
|
||||
for i, container := range pod.Spec.InitContainers {
|
||||
for j, envVar := range container.Env {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.MetadataNamespaceField)
|
||||
pod.Spec.InitContainers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, envVar := range container.Env {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
pod.Spec.Containers[i].Env[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name, value := range pod.Annotations {
|
||||
if strings.Contains(name, webhook.FieldpathField) {
|
||||
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// re-adding these envs to the pod
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
|
||||
Name: envName,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: value,
|
||||
},
|
||||
},
|
||||
})
|
||||
// removing the annotation from the pod
|
||||
delete(tPod.Annotations, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_overrideEnvVars(t *testing.T) {
|
||||
type args struct {
|
||||
orig []corev1.EnvVar
|
||||
new []corev1.EnvVar
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []corev1.EnvVar
|
||||
}{
|
||||
{
|
||||
name: "orig and new are empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "only orig is empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig have multiple elements",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
{
|
||||
name: "orig and new have multiple elements and some not matching",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
kubeAPIAccessPrefix = "kube-api-access"
|
||||
serviceAccountTokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
|
||||
)
|
||||
|
||||
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
|
||||
// to look like the serviceaccount token
|
||||
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
|
||||
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
|
||||
|
||||
// skip this process if the kube-api-access is already removed from the pod
|
||||
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
|
||||
if !isKubeAccessVolumeFound(pod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
|
||||
|
||||
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// extracting the tokens data from the secret we just created
|
||||
virtualSecretKey := types.NamespacedName{
|
||||
Name: virtualSecret.Name,
|
||||
Namespace: virtualSecret.Namespace,
|
||||
}
|
||||
if err := p.VirtualClient.Get(ctx, virtualSecretKey, virtualSecret); err != nil {
|
||||
return err
|
||||
}
|
||||
// To avoid race conditions we need to check if the secret's data has been populated
|
||||
// including the token, ca.crt and namespace
|
||||
if len(virtualSecret.Data) < 3 {
|
||||
return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name)
|
||||
}
|
||||
|
||||
hostSecret := virtualSecret.DeepCopy()
|
||||
hostSecret.Type = ""
|
||||
hostSecret.Annotations = make(map[string]string)
|
||||
|
||||
p.Translator.TranslateTo(hostSecret)
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostSecret); err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.translateToken(tPod, hostSecret.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
corev1.ServiceAccountNameKey: serviceAccountName,
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
}
|
||||
}
|
||||
|
||||
// translateToken will remove the serviceaccount from the pod and replace the kube-api-access volume
|
||||
// with a custom token volume and mount it to all containers within the pod
|
||||
func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) {
|
||||
pod.Spec.ServiceAccountName = ""
|
||||
pod.Spec.DeprecatedServiceAccount = ""
|
||||
pod.Spec.AutomountServiceAccountToken = ptr.To(false)
|
||||
removeKubeAccessVolume(pod)
|
||||
addKubeAccessVolume(pod, hostSecretName)
|
||||
}
|
||||
|
||||
func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func removeKubeAccessVolume(pod *corev1.Pod) {
|
||||
for i, volume := range pod.Spec.Volumes {
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
|
||||
}
|
||||
}
|
||||
// init containers
|
||||
for i, container := range pod.Spec.InitContainers {
|
||||
for j, mountPath := range container.VolumeMounts {
|
||||
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, container := range pod.Spec.Containers {
|
||||
for j, mountPath := range container.VolumeMounts {
|
||||
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
|
||||
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
|
||||
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
|
||||
Name: tokenVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: hostSecretName,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{
|
||||
Name: tokenVolumeName,
|
||||
MountPath: serviceAccountTokenMountPath,
|
||||
})
|
||||
}
|
||||
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{
|
||||
Name: tokenVolumeName,
|
||||
MountPath: serviceAccountTokenMountPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
// translatorSizeQueue feeds the size events from the WebSocket
|
||||
// resizeChan into the SPDY client input. Implements TerminalSizeQueue
|
||||
// interface.
|
||||
type translatorSizeQueue struct {
|
||||
resizeChan <-chan api.TermSize
|
||||
}
|
||||
|
||||
func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize {
|
||||
size, ok := <-t.resizeChan
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &remotecommand.TerminalSize{
|
||||
Width: size.Width,
|
||||
Height: size.Height,
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
package translate
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClusterNameLabel is the key for the label that contains the name of the virtual cluster
|
||||
// this resource was made in
|
||||
ClusterNameLabel = "k3k.io/clusterName"
|
||||
// ResourceNameAnnotation is the key for the annotation that contains the original name of this
|
||||
// resource in the virtual cluster
|
||||
ResourceNameAnnotation = "k3k.io/name"
|
||||
// ResourceNamespaceAnnotation is the key for the annotation that contains the original namespace of this
|
||||
// resource in the virtual cluster
|
||||
ResourceNamespaceAnnotation = "k3k.io/namespace"
|
||||
// MetadataNameField is the downwardapi field for object's name
|
||||
MetadataNameField = "metadata.name"
|
||||
// MetadataNamespaceField is the downward field for the object's namespace
|
||||
MetadataNamespaceField = "metadata.namespace"
|
||||
)
|
||||
|
||||
type ToHostTranslator struct {
|
||||
// ClusterName is the name of the virtual cluster whose resources we are
|
||||
// translating to a host cluster
|
||||
ClusterName string
|
||||
// ClusterNamespace is the namespace of the virtual cluster whose resources
|
||||
// we are translating to a host cluster
|
||||
ClusterNamespace string
|
||||
}
|
||||
|
||||
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
|
||||
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
|
||||
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
|
||||
// may need to fetch the existing value and do some combination before using this.
|
||||
func (t *ToHostTranslator) TranslateTo(obj client.Object) {
|
||||
// owning objects may be in the virtual cluster, but may not be in the host cluster
|
||||
obj.SetOwnerReferences(nil)
|
||||
// add some annotations to make it easier to track source object
|
||||
annotations := obj.GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
|
||||
annotations[ResourceNameAnnotation] = obj.GetName()
|
||||
annotations[ResourceNamespaceAnnotation] = obj.GetNamespace()
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
// add a label to quickly identify objects owned by a given virtual cluster
|
||||
labels := obj.GetLabels()
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
labels[ClusterNameLabel] = t.ClusterName
|
||||
obj.SetLabels(labels)
|
||||
|
||||
// resource version/UID won't match what's in the host cluster.
|
||||
obj.SetResourceVersion("")
|
||||
obj.SetUID("")
|
||||
|
||||
// set the name and the namespace so that this goes in the proper host namespace
|
||||
// and doesn't collide with other resources
|
||||
obj.SetName(t.TranslateName(obj.GetNamespace(), obj.GetName()))
|
||||
obj.SetNamespace(t.ClusterNamespace)
|
||||
obj.SetFinalizers(nil)
|
||||
}
|
||||
|
||||
func (t *ToHostTranslator) TranslateFrom(obj client.Object) {
|
||||
// owning objects may be in the virtual cluster, but may not be in the host cluster
|
||||
obj.SetOwnerReferences(nil)
|
||||
|
||||
// remove the annotations added to track original name
|
||||
annotations := obj.GetAnnotations()
|
||||
// TODO: It's possible that this was erased by a change on the host cluster
|
||||
// In this case, we need to have some sort of fallback or error return
|
||||
name := annotations[ResourceNameAnnotation]
|
||||
namespace := annotations[ResourceNamespaceAnnotation]
|
||||
|
||||
obj.SetName(name)
|
||||
obj.SetNamespace(namespace)
|
||||
delete(annotations, ResourceNameAnnotation)
|
||||
delete(annotations, ResourceNamespaceAnnotation)
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
// remove the clusteName tracking label
|
||||
labels := obj.GetLabels()
|
||||
delete(labels, ClusterNameLabel)
|
||||
obj.SetLabels(labels)
|
||||
|
||||
// resource version/UID won't match what's in the virtual cluster.
|
||||
obj.SetResourceVersion("")
|
||||
obj.SetUID("")
|
||||
}
|
||||
|
||||
// TranslateName returns the name of the resource in the host cluster. Will not update the object with this name.
|
||||
func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
|
||||
// we need to come up with a name which is:
|
||||
// - somewhat connectable to the original resource
|
||||
// - a valid k8s name
|
||||
// - idempotently calculatable
|
||||
// - unique for this combination of name/namespace/cluster
|
||||
namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName)
|
||||
// use + as a separator since it can't be in an object name
|
||||
nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName)
|
||||
// it's possible that the suffix will be in the name, so we use hex to make it valid for k8s
|
||||
nameSuffix := hex.EncodeToString([]byte(nameKey))
|
||||
|
||||
return controller.SafeConcatName(namePrefix, nameSuffix)
|
||||
}
|
||||
137
main.go
137
main.go
@@ -3,157 +3,50 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"flag"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/clusterset"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
clusterCIDR string
|
||||
sharedAgentImage string
|
||||
sharedAgentImagePullPolicy string
|
||||
kubeconfig string
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "Kubeconfig path",
|
||||
Destination: &kubeconfig,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
EnvVars: []string{"CLUSTER_CIDR"},
|
||||
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
|
||||
Destination: &clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-image",
|
||||
EnvVars: []string{"SHARED_AGENT_IMAGE"},
|
||||
Usage: "K3K Virtual Kubelet image",
|
||||
Value: "rancher/k3k:latest",
|
||||
Destination: &sharedAgentImage,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-pull-policy",
|
||||
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
|
||||
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
|
||||
Destination: &sharedAgentImagePullPolicy,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
Usage: "Debug level logging",
|
||||
Destination: &debug,
|
||||
},
|
||||
}
|
||||
)
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Flags = flags
|
||||
app.Action = run
|
||||
app.Version = buildinfo.Version
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if err := validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
ctrlconfig.RegisterFlags(nil)
|
||||
flag.Parse()
|
||||
|
||||
logger = log.New(debug)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logger.Fatalw("failed to run k3k controller", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
|
||||
logger.Info("Starting k3k - Version: " + buildinfo.Version)
|
||||
|
||||
kubeconfig := flag.Lookup("kubeconfig").Value.String()
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config from kubeconfig file: %v", err)
|
||||
klog.Fatalf("Failed to create config from kubeconfig file: %v", err)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(restConfig, manager.Options{
|
||||
Scheme: scheme,
|
||||
Scheme: Scheme,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
|
||||
klog.Fatalf("Failed to create new controller runtime manager: %v", err)
|
||||
}
|
||||
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding etcd pod controller")
|
||||
|
||||
if err := cluster.AddPodController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterset controller")
|
||||
|
||||
if err := clusterset.Add(ctx, mgr, clusterCIDR); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset controller: %v", err)
|
||||
}
|
||||
|
||||
if clusterCIDR == "" {
|
||||
logger.Info("adding networkpolicy node controller")
|
||||
|
||||
if err := clusterset.AddNodeController(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterset node controller: %v", err)
|
||||
}
|
||||
if err := cluster.Add(ctx, mgr); err != nil {
|
||||
klog.Fatalf("Failed to add the new controller: %v", err)
|
||||
}
|
||||
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start the manager: %v", err)
|
||||
klog.Fatalf("Failed to start the manager: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate() error {
|
||||
if sharedAgentImagePullPolicy != "" {
|
||||
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
return errors.New("invalid value for shared agent image policy")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
6
manifest-runtime.tmpl
Normal file
6
manifest-runtime.tmpl
Normal file
@@ -0,0 +1,6 @@
|
||||
image: rancher/k3k:{{replace "+" "-" build.tag}}
|
||||
manifests:
|
||||
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
|
||||
platform:
|
||||
architecture: amd64
|
||||
os: linux
|
||||
16
ops/boilerplate.go.txt
Executable file
16
ops/boilerplate.go.txt
Executable file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
Copyright YEAR Rancher Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
28
ops/build
Executable file
28
ops/build
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p bin deploy
|
||||
|
||||
if [ "$(uname)" = "Linux" ]; then
|
||||
OTHER_LINKFLAGS="-extldflags -static -s"
|
||||
fi
|
||||
|
||||
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
|
||||
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
|
||||
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
|
||||
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
|
||||
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
|
||||
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
|
||||
fi
|
||||
|
||||
# build k3kcli
|
||||
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
|
||||
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
|
||||
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
|
||||
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
|
||||
fi
|
||||
|
||||
10
ops/ci
Executable file
10
ops/ci
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
./build
|
||||
./test
|
||||
./validate
|
||||
./validate-ci
|
||||
./package
|
||||
8
ops/default
Executable file
8
ops/default
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
./build
|
||||
./test
|
||||
./package
|
||||
11
ops/entry
Executable file
11
ops/entry
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
mkdir -p bin dist
|
||||
if [ -e ./ops/$1 ]; then
|
||||
./ops/"$@"
|
||||
else
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
chown -R $DAPPER_UID:$DAPPER_GID .
|
||||
19
ops/package
Executable file
19
ops/package
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p dist/artifacts
|
||||
cp bin/k3k dist/artifacts/k3k${SUFFIX}
|
||||
cp bin/k3kcli dist/artifacts/k3kcli${SUFFIX}
|
||||
|
||||
IMAGE=${REPO}/k3k:${TAG}
|
||||
DOCKERFILE=package/Dockerfile
|
||||
if [ -e ${DOCKERFILE}.${ARCH} ]; then
|
||||
DOCKERFILE=${DOCKERFILE}.${ARCH}
|
||||
fi
|
||||
|
||||
docker build -f ${DOCKERFILE} -t ${IMAGE} .
|
||||
echo Built ${IMAGE}
|
||||
10
ops/package-chart
Executable file
10
ops/package-chart
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p deploy/
|
||||
|
||||
cr package --package-path deploy/ charts/k3k
|
||||
3
ops/release
Executable file
3
ops/release
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec $(dirname $0)/ci
|
||||
32
ops/release-chart
Executable file
32
ops/release-chart
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source $(dirname $0)/version
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
git fetch --tags
|
||||
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
|
||||
if [ $(git tag -l "$version") ]; then
|
||||
echo "tag already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# release the chart with artifacts
|
||||
cr upload --token ${GITHUB_TOKEN} \
|
||||
--release-name-template "chart-{{ .Version }}" \
|
||||
--package-path ./deploy/ \
|
||||
--git-repo k3k \
|
||||
--skip-existing \
|
||||
-o rancher
|
||||
|
||||
# update the index.yaml
|
||||
cr index --token ${GITHUB_TOKEN} \
|
||||
--release-name-template "chart-{{ .Version }}" \
|
||||
--package-path ./deploy/ \
|
||||
--index-path index.yaml \
|
||||
--git-repo k3k \
|
||||
-o rancher \
|
||||
--push
|
||||
|
||||
|
||||
7
ops/test
Executable file
7
ops/test
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
echo Running tests
|
||||
go test -cover -tags=test ./...
|
||||
21
ops/validate
Executable file
21
ops/validate
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
echo Running validation
|
||||
|
||||
PACKAGES="$(go list ./...)"
|
||||
|
||||
if ! command -v golangci-lint; then
|
||||
echo Skipping validation: no golangci-lint available
|
||||
exit
|
||||
fi
|
||||
|
||||
echo Running validation
|
||||
|
||||
echo Running: golangci-lint
|
||||
golangci-lint run
|
||||
|
||||
echo Running: go fmt
|
||||
test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)"
|
||||
15
ops/validate-ci
Executable file
15
ops/validate-ci
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
go generate
|
||||
|
||||
source ./ops/version
|
||||
|
||||
if [ -n "$DIRTY" ]; then
|
||||
echo Git is dirty
|
||||
git status
|
||||
git diff
|
||||
exit 1
|
||||
fi
|
||||
27
ops/version
Executable file
27
ops/version
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
DIRTY="-dirty"
|
||||
fi
|
||||
|
||||
COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)}
|
||||
|
||||
if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then
|
||||
VERSION=$GIT_TAG
|
||||
else
|
||||
VERSION="${COMMIT}${DIRTY}"
|
||||
fi
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
ARCH=$(go env GOHOSTARCH)
|
||||
fi
|
||||
|
||||
SUFFIX="-${ARCH}"
|
||||
|
||||
TAG=${TAG:-${VERSION}${SUFFIX}}
|
||||
REPO=${REPO:-rancher}
|
||||
|
||||
if echo $TAG | grep -q dirty; then
|
||||
TAG=dev
|
||||
fi
|
||||
4
package/Dockerfile
Normal file
4
package/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM alpine
|
||||
COPY bin/k3k /usr/bin/
|
||||
COPY bin/k3kcli /usr/bin/
|
||||
CMD ["k3k"]
|
||||
@@ -1,9 +0,0 @@
|
||||
FROM alpine
|
||||
|
||||
ARG BIN_K3K=bin/k3k
|
||||
ARG BIN_K3KCLI=bin/k3kcli
|
||||
|
||||
COPY ${BIN_K3K} /usr/bin/
|
||||
COPY ${BIN_K3KCLI} /usr/bin/
|
||||
|
||||
CMD ["k3k"]
|
||||
@@ -1,8 +0,0 @@
|
||||
# TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier
|
||||
FROM registry.suse.com/bci/bci-base:15.6
|
||||
|
||||
ARG BIN_K3K_KUBELET=bin/k3k-kubelet
|
||||
|
||||
COPY ${BIN_K3K_KUBELET} /usr/bin/
|
||||
|
||||
ENTRYPOINT ["/usr/bin/k3k-kubelet"]
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
|
||||
|
||||
var (
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
|
||||
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemBuilder.AddToScheme
|
||||
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
@@ -20,11 +21,7 @@ func Resource(resource string) schema.GroupResource {
|
||||
func addKnownTypes(s *runtime.Scheme) error {
|
||||
s.AddKnownTypes(SchemeGroupVersion,
|
||||
&Cluster{},
|
||||
&ClusterList{},
|
||||
&ClusterSet{},
|
||||
&ClusterSetList{},
|
||||
)
|
||||
&ClusterList{})
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,306 +1,45 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
|
||||
// Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
// It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
// k3k uses this to provision and manage these virtual clusters.
|
||||
type Cluster struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Spec defines the desired state of the Cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
|
||||
// Status reflects the observed state of the Cluster.
|
||||
//
|
||||
// +optional
|
||||
Status ClusterStatus `json:"status,omitempty"`
|
||||
Spec ClusterSpec `json:"spec"`
|
||||
Status ClusterStatus `json:"status"`
|
||||
}
|
||||
|
||||
// ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
type ClusterSpec struct {
|
||||
// Version is the K3s version to use for the virtual nodes.
|
||||
// It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
// If not specified, the Kubernetes version of the host node will be used.
|
||||
//
|
||||
// +optional
|
||||
Version string `json:"version"`
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Servers *int32 `json:"servers"`
|
||||
Agents *int32 `json:"agents"`
|
||||
Token string `json:"token"`
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
ServerArgs []string `json:"serverArgs,omitempty"`
|
||||
AgentArgs []string `json:"agentArgs,omitempty"`
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
|
||||
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
// Defaults to "shared". This field is immutable.
|
||||
//
|
||||
// +kubebuilder:default="shared"
|
||||
// +kubebuilder:validation:Enum=shared;virtual
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
Mode ClusterMode `json:"mode,omitempty"`
|
||||
|
||||
// Servers specifies the number of K3s pods to run in server (control plane) mode.
|
||||
// Must be at least 1. Defaults to 1.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
|
||||
// +kubebuilder:default=1
|
||||
// +optional
|
||||
Servers *int32 `json:"servers"`
|
||||
|
||||
// Agents specifies the number of K3s pods to run in agent (worker) mode.
|
||||
// Must be 0 or greater. Defaults to 0.
|
||||
// This field is ignored in "shared" mode.
|
||||
//
|
||||
// +kubebuilder:default=0
|
||||
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
|
||||
// +optional
|
||||
Agents *int32 `json:"agents"`
|
||||
|
||||
// ClusterCIDR is the CIDR range for pod IPs.
|
||||
// Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="clusterCIDR is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
|
||||
// ServiceCIDR is the CIDR range for service IPs.
|
||||
// Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="serviceCIDR is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
|
||||
// ClusterDNS is the IP address for the CoreDNS service.
|
||||
// Must be within the ServiceCIDR range. Defaults to 10.43.0.10.
|
||||
// This field is immutable.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:message="clusterDNS is immutable",rule="self == oldSelf"
|
||||
// +optional
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
|
||||
// Persistence specifies options for persisting etcd data.
|
||||
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
// A default StorageClass is required for dynamic persistence.
|
||||
//
|
||||
// +kubebuilder:default={type: "dynamic"}
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
//
|
||||
// +optional
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
|
||||
// NodeSelector specifies node labels to constrain where server/agent pods are scheduled.
|
||||
// In "shared" mode, this also applies to workloads.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// PriorityClass specifies the priorityClassName for server/agent pods.
|
||||
// In "shared" mode, this also applies to workloads.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass string `json:"priorityClass,omitempty"`
|
||||
|
||||
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
// +optional
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
// +optional
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// ServerArgs specifies ordered key-value pairs for K3s server pods.
|
||||
// Example: ["--tls-san=example.com"]
|
||||
//
|
||||
// +optional
|
||||
ServerArgs []string `json:"serverArgs,omitempty"`
|
||||
|
||||
// AgentArgs specifies ordered key-value pairs for K3s agent pods.
|
||||
// Example: ["--node-name=my-agent-node"]
|
||||
//
|
||||
// +optional
|
||||
AgentArgs []string `json:"agentArgs,omitempty"`
|
||||
|
||||
// Addons specifies secrets containing raw YAML to deploy on cluster startup.
|
||||
//
|
||||
// +optional
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
//
|
||||
// +optional
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
//
|
||||
// +optional
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
Persistence *PersistenceConfig `json:"persistence,omitempty"`
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=shared;virtual
|
||||
// +kubebuilder:default="shared"
|
||||
type ClusterMode string
|
||||
|
||||
const (
|
||||
// SharedClusterMode represents a cluster that shares resources with the host node.
|
||||
SharedClusterMode = ClusterMode("shared")
|
||||
|
||||
// VirtualClusterMode represents a cluster that runs in a virtual environment.
|
||||
VirtualClusterMode = ClusterMode("virtual")
|
||||
)
|
||||
|
||||
// PersistenceMode is the storage mode of a Cluster.
|
||||
//
|
||||
// +kubebuilder:default="dynamic"
|
||||
type PersistenceMode string
|
||||
|
||||
const (
|
||||
// EphemeralPersistenceMode represents a cluster with no data persistence.
|
||||
EphemeralPersistenceMode = PersistenceMode("ephemeral")
|
||||
|
||||
// DynamicPersistenceMode represents a cluster with dynamic data persistence using a PVC.
|
||||
DynamicPersistenceMode = PersistenceMode("dynamic")
|
||||
)
|
||||
|
||||
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
type Addon struct {
|
||||
// SecretNamespace is the namespace of the Secret.
|
||||
SecretNamespace string `json:"secretNamespace,omitempty"`
|
||||
|
||||
// SecretRef is the name of the Secret.
|
||||
SecretRef string `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
// PersistenceConfig specifies options for persisting etcd data.
|
||||
type PersistenceConfig struct {
|
||||
// Type specifies the persistence mode.
|
||||
//
|
||||
// +kubebuilder:default="dynamic"
|
||||
Type PersistenceMode `json:"type"`
|
||||
|
||||
// StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +optional
|
||||
StorageClassName *string `json:"storageClassName,omitempty"`
|
||||
|
||||
// StorageRequestSize is the requested size for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +optional
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
|
||||
// ExposeConfig specifies options for exposing the API server.
|
||||
type ExposeConfig struct {
|
||||
// Ingress specifies options for exposing the API server through an Ingress.
|
||||
//
|
||||
// +optional
|
||||
Ingress *IngressConfig `json:"ingress,omitempty"`
|
||||
|
||||
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
|
||||
//
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
|
||||
|
||||
// NodePort specifies options for exposing the API server through NodePort.
|
||||
//
|
||||
// +optional
|
||||
NodePort *NodePortConfig `json:"nodePort,omitempty"`
|
||||
}
|
||||
|
||||
// IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
type IngressConfig struct {
|
||||
// Annotations specifies annotations to add to the Ingress.
|
||||
//
|
||||
// +optional
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// IngressClassName specifies the IngressClass to use for the Ingress.
|
||||
//
|
||||
// +optional
|
||||
IngressClassName string `json:"ingressClassName,omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
type LoadBalancerConfig struct{}
|
||||
|
||||
// NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
type NodePortConfig struct {
|
||||
// ServerPort is the port on each node on which the K3s server service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ServerPort *int32 `json:"serverPort,omitempty"`
|
||||
|
||||
// ServicePort is the port on each node on which the K3s service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ServicePort *int32 `json:"servicePort,omitempty"`
|
||||
|
||||
// ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort.
|
||||
// If not specified, a port will be allocated (default: 30000-32767).
|
||||
//
|
||||
// +optional
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterStatus reflects the observed state of a Cluster.
|
||||
type ClusterStatus struct {
|
||||
// HostVersion is the Kubernetes version of the host node.
|
||||
//
|
||||
// +optional
|
||||
HostVersion string `json:"hostVersion,omitempty"`
|
||||
|
||||
// ClusterCIDR is the CIDR range for pod IPs.
|
||||
//
|
||||
// +optional
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
|
||||
// ServiceCIDR is the CIDR range for service IPs.
|
||||
//
|
||||
// +optional
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
|
||||
// ClusterDNS is the IP address for the CoreDNS service.
|
||||
//
|
||||
// +optional
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
// +optional
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// Persistence specifies options for persisting etcd data.
|
||||
//
|
||||
// +optional
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
SecretRef string `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ClusterList is a list of Cluster resources.
|
||||
type ClusterList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
@@ -308,128 +47,34 @@ type ClusterList struct {
|
||||
Items []Cluster `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:validation:XValidation:rule="self.metadata.name == \"default\"",message="Name must match 'default'"
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name=Display Name,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date
|
||||
|
||||
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
// It allows defining common configurations and constraints for the clusters within the set.
|
||||
type ClusterSet struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Spec defines the desired state of the ClusterSet.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
Spec ClusterSetSpec `json:"spec"`
|
||||
|
||||
// Status reflects the observed state of the ClusterSet.
|
||||
//
|
||||
// +optional
|
||||
Status ClusterSetStatus `json:"status,omitempty"`
|
||||
type PersistenceConfig struct {
|
||||
// Type can be ephermal, static, dynamic
|
||||
Type string `json:"type"`
|
||||
StorageClassName string `json:"storageClassName,omitempty"`
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterSetSpec defines the desired state of a ClusterSet.
|
||||
type ClusterSetSpec struct {
|
||||
|
||||
// DisplayName is the human-readable name for the set.
|
||||
//
|
||||
// +optional
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
|
||||
// Quota specifies the resource limits for clusters within a clusterset.
|
||||
//
|
||||
// +optional
|
||||
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
|
||||
// Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
|
||||
// to set defaults and constraints (min/max)
|
||||
//
|
||||
// +optional
|
||||
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
|
||||
//
|
||||
// +optional
|
||||
DefaultNodeSelector map[string]string `json:"defaultNodeSelector,omitempty"`
|
||||
|
||||
// DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the set.
|
||||
//
|
||||
// +optional
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// AllowedModeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
|
||||
//
|
||||
// +kubebuilder:default={shared}
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// +optional
|
||||
AllowedModeTypes []ClusterMode `json:"allowedModeTypes,omitempty"`
|
||||
|
||||
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
|
||||
//
|
||||
// +optional
|
||||
DisableNetworkPolicy bool `json:"disableNetworkPolicy,omitempty"`
|
||||
|
||||
// PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace.
|
||||
//
|
||||
// +optional
|
||||
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
|
||||
type ExposeConfig struct {
|
||||
Ingress *IngressConfig `json:"ingress"`
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer"`
|
||||
NodePort *NodePortConfig `json:"nodePort"`
|
||||
}
|
||||
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=privileged;baseline;restricted
|
||||
type PodSecurityAdmissionLevel string
|
||||
|
||||
const (
|
||||
// PrivilegedPodSecurityAdmissionLevel allows all pods to be admitted.
|
||||
PrivilegedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("privileged")
|
||||
|
||||
// BaselinePodSecurityAdmissionLevel enforces a baseline level of security restrictions.
|
||||
BaselinePodSecurityAdmissionLevel = PodSecurityAdmissionLevel("baseline")
|
||||
|
||||
// RestrictedPodSecurityAdmissionLevel enforces stricter security restrictions.
|
||||
RestrictedPodSecurityAdmissionLevel = PodSecurityAdmissionLevel("restricted")
|
||||
)
|
||||
|
||||
// ClusterSetStatus reflects the observed state of a ClusterSet.
|
||||
type ClusterSetStatus struct {
|
||||
// ObservedGeneration was the generation at the time the status was updated.
|
||||
//
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// LastUpdate is the timestamp when the status was last updated.
|
||||
//
|
||||
// +optional
|
||||
LastUpdate string `json:"lastUpdateTime,omitempty"`
|
||||
|
||||
// Summary is a summary of the status.
|
||||
//
|
||||
// +optional
|
||||
Summary string `json:"summary,omitempty"`
|
||||
|
||||
// Conditions are the individual conditions for the cluster set.
|
||||
//
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
type IngressConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
IngressClassName string `json:"ingressClassName"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ClusterSetList is a list of ClusterSet resources.
|
||||
type ClusterSetList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Items []ClusterSet `json:"items"`
|
||||
type LoadBalancerConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type NodePortConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type ClusterStatus struct {
|
||||
ClusterCIDR string `json:"clusterCIDR,omitempty"`
|
||||
ServiceCIDR string `json:"serviceCIDR,omitempty"`
|
||||
ClusterDNS string `json:"clusterDNS,omitempty"`
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -33,7 +31,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
@@ -88,133 +86,6 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSet) DeepCopyInto(out *ClusterSet) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet.
|
||||
func (in *ClusterSet) DeepCopy() *ClusterSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) {
|
||||
*out = *in
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList.
|
||||
func (in *ClusterSetList) DeepCopy() *ClusterSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
|
||||
*out = *in
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(v1.ResourceQuotaSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Limit != nil {
|
||||
in, out := &in.Limit, &out.Limit
|
||||
*out = new(v1.LimitRangeSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultNodeSelector != nil {
|
||||
in, out := &in.DefaultNodeSelector, &out.DefaultNodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AllowedModeTypes != nil {
|
||||
in, out := &in.AllowedModeTypes, &out.AllowedModeTypes
|
||||
*out = make([]ClusterMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodSecurityAdmissionLevel != nil {
|
||||
in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec.
|
||||
func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus.
|
||||
func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = *in
|
||||
@@ -228,29 +99,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TokenSecretRef != nil {
|
||||
in, out := &in.TokenSecretRef, &out.TokenSecretRef
|
||||
*out = new(v1.SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ServerArgs != nil {
|
||||
in, out := &in.ServerArgs, &out.ServerArgs
|
||||
*out = make([]string, len(*in))
|
||||
@@ -261,24 +109,25 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Addons != nil {
|
||||
in, out := &in.Addons, &out.Addons
|
||||
*out = make([]Addon, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ServerLimit != nil {
|
||||
in, out := &in.ServerLimit, &out.ServerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
if in.Persistence != nil {
|
||||
in, out := &in.Persistence, &out.Persistence
|
||||
*out = new(PersistenceConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.WorkerLimit != nil {
|
||||
in, out := &in.WorkerLimit, &out.WorkerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -296,12 +145,6 @@ func (in *ClusterSpec) DeepCopy() *ClusterSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = *in
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -321,7 +164,7 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
|
||||
if in.Ingress != nil {
|
||||
in, out := &in.Ingress, &out.Ingress
|
||||
*out = new(IngressConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
**out = **in
|
||||
}
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
@@ -331,7 +174,7 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
|
||||
if in.NodePort != nil {
|
||||
in, out := &in.NodePort, &out.NodePort
|
||||
*out = new(NodePortConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -349,13 +192,6 @@ func (in *ExposeConfig) DeepCopy() *ExposeConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressConfig) DeepCopyInto(out *IngressConfig) {
|
||||
*out = *in
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -388,21 +224,6 @@ func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
|
||||
*out = *in
|
||||
if in.ServerPort != nil {
|
||||
in, out := &in.ServerPort, &out.ServerPort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ServicePort != nil {
|
||||
in, out := &in.ServicePort, &out.ServicePort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.ETCDPort != nil {
|
||||
in, out := &in.ETCDPort, &out.ETCDPort
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -419,11 +240,6 @@ func (in *NodePortConfig) DeepCopy() *NodePortConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
|
||||
*out = *in
|
||||
if in.StorageClassName != nil {
|
||||
in, out := &in.StorageClassName, &out.StorageClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
package buildinfo
|
||||
|
||||
var Version = "dev"
|
||||
@@ -1,74 +0,0 @@
|
||||
package certs
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
)
|
||||
|
||||
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) {
|
||||
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
b, err := generateKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
key, err := certutil.ParsePrivateKeyPEM(b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: commonName,
|
||||
Organization: organization,
|
||||
Usages: extKeyUsage,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
if altNames != nil {
|
||||
cfg.AltNames = *altNames
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
|
||||
}
|
||||
|
||||
func generateKey() (data []byte, err error) {
|
||||
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating key: %v", err)
|
||||
}
|
||||
|
||||
return generatedData, nil
|
||||
}
|
||||
|
||||
func AddSANs(sans []string) certutil.AltNames {
|
||||
var altNames certutil.AltNames
|
||||
|
||||
for _, san := range sans {
|
||||
ip := net.ParseIP(san)
|
||||
if ip == nil {
|
||||
altNames.DNSNames = append(altNames.DNSNames, san)
|
||||
} else {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
|
||||
return altNames
|
||||
}
|
||||
@@ -1,62 +1,246 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"github.com/rancher/k3k/pkg/controller/util"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
configName = "agent-config"
|
||||
)
|
||||
const agentName = "k3k-agent"
|
||||
|
||||
type ResourceEnsurer interface {
|
||||
EnsureResources(context.Context) error
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
type Agent struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
client ctrlruntimeclient.Client
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
|
||||
return &Config{
|
||||
func New(cluster *v1alpha1.Cluster) *Agent {
|
||||
return &Agent{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
scheme: scheme,
|
||||
}
|
||||
}
|
||||
|
||||
func configSecretName(clusterName string) string {
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, configName)
|
||||
func (a *Agent) Deploy() *apps.Deployment {
|
||||
image := util.K3SImage(a.cluster)
|
||||
|
||||
const name = "k3k-agent"
|
||||
|
||||
return &apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.cluster.Name + "-" + name,
|
||||
Namespace: util.ClusterNamespace(a.cluster),
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: a.cluster.Spec.Agents,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"cluster": a.cluster.Name,
|
||||
"type": "agent",
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"cluster": a.cluster.Name,
|
||||
"type": "agent",
|
||||
},
|
||||
},
|
||||
Spec: a.podSpec(image, name, a.cluster.Spec.AgentArgs, false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
|
||||
image := util.K3SImage(cluster)
|
||||
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
|
||||
|
||||
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
|
||||
return err
|
||||
return &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Statefulset",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cluster.Name + "-" + agentName,
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
ServiceName: cluster.Name + "-" + agentName + "-headless",
|
||||
Replicas: cluster.Spec.Agents,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"cluster": cluster.Name,
|
||||
"type": "agent",
|
||||
},
|
||||
},
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "varlibrancherk3s",
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "varlibkubelet",
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
|
||||
},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"cluster": cluster.Name,
|
||||
"type": "agent",
|
||||
},
|
||||
},
|
||||
Spec: a.podSpec(image, agentName, cluster.Spec.AgentArgs, true),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := cfg.client.Create(ctx, obj); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return cfg.client.Update(ctx, obj)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) podSpec(image, name string, args []string, statefulSet bool) v1.PodSpec {
|
||||
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
|
||||
podSpec := v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name + "-config",
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "config.yaml",
|
||||
Path: "config.yaml",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "varrun",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "varlibcni",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "varlog",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: pointer.Bool(true),
|
||||
},
|
||||
Command: []string{
|
||||
"/bin/k3s",
|
||||
},
|
||||
Args: args,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/opt/rancher/k3s/",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
MountPath: "/run",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "varrun",
|
||||
MountPath: "/var/run",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "varlibcni",
|
||||
MountPath: "/var/lib/cni",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "varlibkubelet",
|
||||
MountPath: "/var/lib/kubelet",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "varlibrancherk3s",
|
||||
MountPath: "/var/lib/rancher/k3s",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "varlog",
|
||||
MountPath: "/var/log",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !statefulSet {
|
||||
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
|
||||
Name: "varlibkubelet",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}, v1.Volume{
|
||||
|
||||
Name: "varlibrancherk3s",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
30
pkg/controller/cluster/agent/service.go
Normal file
30
pkg/controller/cluster/agent/service.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (a *Agent) StatefulAgentService(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
return &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cluster.Name + "-" + agentName + "-headless",
|
||||
Namespace: util.ClusterNamespace(cluster),
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
ClusterIP: v1.ClusterIPNone,
|
||||
Selector: map[string]string{
|
||||
"cluster": cluster.Name,
|
||||
"role": "agent",
|
||||
},
|
||||
Ports: []v1.ServicePort{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,474 +0,0 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml"
|
||||
SharedNodeAgentName = "kubelet"
|
||||
SharedNodeMode = "shared"
|
||||
)
|
||||
|
||||
type SharedAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
token string
|
||||
}
|
||||
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string) *SharedAgent {
|
||||
return &SharedAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SharedAgent) Name() string {
|
||||
return controller.SafeConcatNameWithPrefix(s.cluster.Name, SharedNodeAgentName)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) EnsureResources(ctx context.Context) error {
|
||||
if err := errors.Join(
|
||||
s.config(ctx),
|
||||
s.serviceAccount(ctx),
|
||||
s.role(ctx),
|
||||
s.roleBinding(ctx),
|
||||
s.service(ctx),
|
||||
s.daemonset(ctx),
|
||||
s.dnsService(ctx),
|
||||
s.webhookTLS(ctx),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to ensure some resources: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Object) error {
|
||||
return ensureObject(ctx, s.Config, obj)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) config(ctx context.Context) error {
|
||||
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP)
|
||||
|
||||
configSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configSecretName(s.cluster.Name),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config.yaml": []byte(config),
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, configSecret)
|
||||
}
|
||||
|
||||
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) string {
|
||||
version := cluster.Spec.Version
|
||||
if cluster.Spec.Version == "" {
|
||||
version = cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`clusterName: %s
|
||||
clusterNamespace: %s
|
||||
serverIP: %s
|
||||
serviceName: %s
|
||||
token: %s
|
||||
version: %s`,
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
labels := map[string]string{
|
||||
"cluster": s.cluster.Name,
|
||||
"type": "agent",
|
||||
"mode": "shared",
|
||||
}
|
||||
|
||||
deploy := &apps.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DaemonSet",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: s.podSpec(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, deploy)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
ServiceAccountName: s.Name(),
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: configSecretName(s.cluster.Name),
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "config.yaml",
|
||||
Path: "config.yaml",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: WebhookSecretName(s.cluster.Name),
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "tls.crt",
|
||||
Path: "tls.crt",
|
||||
},
|
||||
{
|
||||
Key: "tls.key",
|
||||
Path: "tls.key",
|
||||
},
|
||||
{
|
||||
Key: "ca.crt",
|
||||
Path: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: s.Name(),
|
||||
Image: s.image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{},
|
||||
},
|
||||
Args: []string{
|
||||
"--config",
|
||||
sharedKubeletConfigPath,
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "AGENT_HOSTNAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/opt/rancher/k3k/",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
MountPath: "/opt/rancher/k3k-webhook",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "webhook-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
ContainerPort: 9443,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SharedAgent) service(ctx context.Context) error {
|
||||
svc := &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"cluster": s.cluster.Name,
|
||||
"type": "agent",
|
||||
"mode": "shared",
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "k3s-kubelet-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 10250,
|
||||
},
|
||||
{
|
||||
Name: "webhook-server",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 9443,
|
||||
TargetPort: intstr.FromInt32(9443),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, svc)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) dnsService(ctx context.Context) error {
|
||||
dnsServiceName := controller.SafeConcatNameWithPrefix(s.cluster.Name, "kube-dns")
|
||||
|
||||
svc := &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dnsServiceName,
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
translate.ClusterNameLabel: s.cluster.Name,
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "dns",
|
||||
Protocol: v1.ProtocolUDP,
|
||||
Port: 53,
|
||||
TargetPort: intstr.FromInt32(53),
|
||||
},
|
||||
{
|
||||
Name: "dns-tcp",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 53,
|
||||
TargetPort: intstr.FromInt32(53),
|
||||
},
|
||||
{
|
||||
Name: "metrics",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 9153,
|
||||
TargetPort: intstr.FromInt32(9153),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, svc)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) serviceAccount(ctx context.Context) error {
|
||||
svcAccount := &v1.ServiceAccount{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ServiceAccount",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, svcAccount)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) role(ctx context.Context) error {
|
||||
role := &rbacv1.Role{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Role",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/exec", "secrets", "configmaps", "services"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"k3k.io"},
|
||||
Resources: []string{"clusters"},
|
||||
Verbs: []string{"get", "watch", "list"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"coordination.k8s.io"},
|
||||
Resources: []string{"leases"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, role)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) roleBinding(ctx context.Context) error {
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "RoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: s.Name(),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: s.Name(),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, roleBinding)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) webhookTLS(ctx context.Context) error {
|
||||
webhookSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: WebhookSecretName(s.cluster.Name),
|
||||
Namespace: s.cluster.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(webhookSecret)
|
||||
if err := s.client.Get(ctx, key, webhookSecret); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
caPrivateKeyPEM, caCertPEM, err := newWebhookSelfSignedCACerts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := []string{s.Name(), s.cluster.Name}
|
||||
|
||||
webhookCert, webhookKey, err := newWebhookCerts(s.Name(), altNames, caPrivateKeyPEM, caCertPEM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
webhookSecret.Data = map[string][]byte{
|
||||
"tls.crt": webhookCert,
|
||||
"tls.key": webhookKey,
|
||||
"ca.crt": caCertPEM,
|
||||
"ca.key": caPrivateKeyPEM,
|
||||
}
|
||||
|
||||
return s.ensureObject(ctx, webhookSecret)
|
||||
}
|
||||
|
||||
// if the webhook secret is found we can skip
|
||||
// we should check for their validity
|
||||
return nil
|
||||
}
|
||||
|
||||
func newWebhookSelfSignedCACerts() ([]byte, []byte, error) {
|
||||
// generate CA CERT/KEY
|
||||
caPrivateKeyPEM, err := certutil.MakeEllipticPrivateKeyPEM()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
caPrivateKey, err := certutil.ParsePrivateKeyPEM(caPrivateKeyPEM)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: fmt.Sprintf("k3k-webhook-ca@%d", time.Now().Unix()),
|
||||
}
|
||||
|
||||
caCert, err := certutil.NewSelfSignedCACert(cfg, caPrivateKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
caCertPEM := certutil.EncodeCertPEM(caCert)
|
||||
|
||||
return caPrivateKeyPEM, caCertPEM, nil
|
||||
}
|
||||
|
||||
func newWebhookCerts(commonName string, subAltNames []string, caPrivateKey, caCert []byte) ([]byte, []byte, error) {
|
||||
// generate webhook cert bundle
|
||||
altNames := certs.AddSANs(subAltNames)
|
||||
oneYearExpiration := time.Until(time.Now().AddDate(1, 0, 0))
|
||||
|
||||
return certs.CreateClientCertKey(
|
||||
commonName,
|
||||
nil,
|
||||
&altNames,
|
||||
[]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
oneYearExpiration,
|
||||
string(caCert),
|
||||
string(caPrivateKey),
|
||||
)
|
||||
}
|
||||
|
||||
func WebhookSecretName(clusterName string) string {
|
||||
return controller.SafeConcatNameWithPrefix(clusterName, "webhook")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user