Compare commits

..

54 Commits

Author SHA1 Message Date
Hussein Galal
3879912b57 Move to Github Action (#105)
* Move to Github Action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move to Github Action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix code generation

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add release and chart workflows

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* test release and charts

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix GHA migration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix GHA migration

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-05-21 00:00:47 +03:00
Hussein Galal
0d6bf4922a Fix code generation (#104)
* Fix code generation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* update go

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-05-14 01:05:13 +03:00
Cuong Nguyen Duc
57c24f6f3c Correct file name (#95) 2024-03-18 08:46:59 +02:00
Hussein Galal
fe23607b71 Update chart to 0.1.4-r1 (#98)
* Update chart to 0.1.4-r1

* Update image to v0.2.1
2024-03-15 02:10:33 +02:00
Hussein Galal
caa0537d5e Renaming binaries and fix typo (#97) 2024-03-15 01:39:18 +02:00
Hussein Galal
0cad65e4fe Fix for readiness probe (#96)
* Fix for readiness probe

* update code generator code
2024-03-15 01:04:52 +02:00
Hussein Galal
cc914cf870 Update chart (#91)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-02-15 23:59:59 +02:00
Hussein Galal
ba35d12124 Cluster spec update (#90)
* Remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* enable cluster server and agent update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-25 06:37:59 +02:00
Hussein Galal
6fc22df6bc Cluster type validations (#89)
* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-12 23:09:30 +02:00
Hussein Galal
c92f722122 Add delete subcommand (#88)
* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 02:36:12 +02:00
Hussein Galal
5e141fe98e Add kubeconfig subcommand (#87)
* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 00:57:46 +02:00
Hussein Galal
4b2308e709 Update chart to v0.1.2-r1 (#82)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 07:38:54 +02:00
Hussein Galal
3cdcb04e1a Add validation for system cluster name for both controller and cli (#81)
* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 02:15:20 +02:00
Hussein Galal
fedfa109b5 Fix append to empty slice (#80)
* Fix append to empty slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix initialization of addresses slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-04 01:49:48 +02:00
Hussein Galal
99d043f2ee fix chart releases (#79)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:55:09 +02:00
Hussein Galal
57ed675a7f fix chart releases (#78)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:49:05 +02:00
Hussein Galal
7c9060c394 fix chart release (#77)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:37:08 +02:00
Hussein Galal
a104aacf5f Add github config mail and username for pushing k3k release (#76)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:24:46 +02:00
Hussein Galal
6346b06eb3 Add github config mail and username for pushing k3k release (#75)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:08:10 +02:00
Hussein Galal
6fd745f268 Fix chart release (#74)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 01:53:26 +02:00
Hussein Galal
1258fb6d58 Upgrade chart and fix manifest (#73)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 00:03:08 +02:00
Matt Trachier
130dbb0a33 Fix: upgrade go in go.mod (#64)
Signed-off-by: matttrach <matttrach@gmail.com>
2023-12-13 00:21:26 +02:00
Hussein Galal
67c8cac611 [controller] HA stabilization and fix rejoining ephermal nodes (#68)
* Remove etcd member if server pod gets removed

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* make sure to add finalizer to server pod only in HA mode

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix recursion bug and add new fields to cluster status

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixing comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-12-13 00:21:02 +02:00
Hussein Galal
dd618e580a use statefulsets for servers (#67)
* use statefulsets for servers

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused code

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-11-28 23:51:50 +02:00
Paulo Gomes
dc2f410c17 build: Align drone base images (#66)
Align the base images used in drone with the images used across the
ecosystem.
2023-11-28 19:01:37 +02:00
Hussein Galal
a620f6c66f Fix kubeconfig extract in cli (#65)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-11-28 19:00:10 +02:00
Hussein Galal
3c283ce178 Add readiness probe and fix readme (#63)
* Add readiness probe and fix readme

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typos and fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-09-05 22:24:27 +03:00
Brian Downs
0dd234b2d5 Add Addon Feature (#61)
* resolve conflicts and other changes

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* updates

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* fix remaining conflict

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* add back cluster and service cidr

Signed-off-by: Brian Downs <brian.downs@gmail.com>

---------

Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-31 01:34:35 +03:00
Johnatas
986216f9cd Feat improve k3kcli os support (#62)
* Allowing multiple OSs

* add docs for windows

* Improve docs

* add manifests

* fix readme

* Update README.md

* Improve macos docs

* Change base build to go v1.20.7 and add freebsd
2023-08-31 01:27:48 +03:00
Waleed Malik
717808b03b Fix helm chart installation steps in documentation (#56)
Signed-off-by: Waleed Malik <ahmedwaleedmalik@gmail.com>
2023-08-08 23:33:53 +03:00
Hussein Galal
9dbd0bef44 Add cluster persistence with statefulsets (#55)
* Add cluster persistence with statefulsets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-08-08 23:23:55 +03:00
Brian Downs
def1746f1f Merge pull request #59 from briandowns/updates
remove some unused code and additional updates
2023-08-01 14:08:51 -04:00
Brian Downs
d32ce24d31 remove some unused code and additional updates
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-01 10:58:18 -07:00
Brian Downs
46965eb692 Merge pull request #58 from briandowns/update_go
update go version
2023-07-27 10:11:28 -07:00
Brian Downs
5bed1bd6ee update go version
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-07-27 10:06:59 -07:00
Hussein Galal
8968fe1d62 Fix docker image tag (#52)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-07-04 00:03:28 +03:00
Brad Davidson
84d3f768c6 Run k3s as pid 1 (#50)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
2023-06-27 02:43:01 +03:00
Hussein Galal
decf24cb2a K3k chart (#51)
* Add release chart drone action

* fix release charts

* Add deploy dir to Dapper

* Add remove build step from drone k3k-chart

* Fix repo and org name

* fix ci

* add index.yaml
2023-06-24 00:27:05 +03:00
Hussein Galal
861078fa85 Remove github workflow and add drone (#49) 2023-06-21 00:43:16 +03:00
Hussein Galal
da5ddb27b5 Use env variables in CI action (#48) 2023-06-20 21:27:35 +03:00
Hussein Galal
e1576343a8 Fix action ci (#47) 2023-06-20 21:02:41 +03:00
Hussein Galal
ff256a324b fix action (#46)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:57:20 +03:00
Hussein Galal
6318fc29bf use custom action (#45)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:43:39 +03:00
Hussein Galal
d9eafbb1d2 Use ibuildthecloud/github-release in github action (#44)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:21:33 +03:00
Hussein Galal
da3ba1b5ff Fix release ci (#43) 2023-06-20 19:27:40 +03:00
Hussein Galal
176deae781 Update the chart (#38)
* Update chart to v0.1.1-k3k2

* update image tag
2023-06-14 03:18:44 +03:00
Brian Downs
7ec204683f Merge pull request #39 from briandowns/secure_build
update to perform secure build and possible arm64 support
2023-06-13 17:18:35 -07:00
Brian Downs
fac92fb21a update to perform secure build and possible arm64 support
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 17:16:51 -07:00
Brian Downs
fb40f65c75 Merge pull request #37 from briandowns/update_package_refs 2023-06-13 17:00:42 -07:00
Brian Downs
b2e969f6df update package refs
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 16:45:46 -07:00
Hussein Galal
43d7779dfa Export k3k cluster kubeconfig in k3kcli (#36)
* Export k3k cluster kubeconfig in k3kcli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update readme and logs

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-13 19:48:57 +03:00
Hussein Galal
ea1e7e486f Revert CIDR pool allocation and fix delete (#35)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-28 23:45:57 +02:00
Hussein Galal
7bcc312b4b move crds to the helm chart (#34)
* Fixes to the controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move crds to the helm chart

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix statically configured cluster

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-23 21:33:52 +02:00
Hussein Galal
dde877e285 Fixes to the controller and cli (#33)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 18:00:07 +02:00
58 changed files with 2389 additions and 1347 deletions

22
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
on:
push:
branches:
- master
pull_request:
name: Build
permissions:
contents: read
jobs:
build-cross-arch:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Cross Arch Build
run: |
make ci
env:
CROSS: "true"

View File

@@ -1,32 +1,30 @@
name: Chart Release
on:
push:
branches:
- main
tags:
- "chart-*"
name: Chart
permissions:
contents: write
id-token: write
jobs:
release:
permissions:
contents: write
chart-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Checkout code
uses: actions/checkout@v4
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Package Chart
run: |
make package-chart;
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: charts
env:
CR_TOKEN: "${{ secrets.TOKEN }}"
- name: Release Chart
uses: softprops/action-gh-release@v2
with:
files: |
deploy/*
- name: Index Chart
run: |
make index-chart

View File

@@ -1,42 +1,52 @@
name: K3K Release
on:
push:
tags:
- "v*"
push:
tags:
- "v*"
name: Release
permissions:
contents: write
id-token: write
jobs:
release:
permissions:
contents: write
release-cross-arch:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build K3K
uses: addnab/docker-run-action@v3
with:
registry: docker.io
image: rancher/dapper:v0.5.5
options: -v ${{ github.workspace }}:/work -v /var/run/docker.sock:/var/run/docker.sock
run: |
cd /work && dapper ci
- name: Publish Binaries
uses: SierraSoftworks/gh-releases@v1.0.7
with:
token: ${{ secrets.TOKEN }}
overwrite: 'true'
files: |
${{ github.workspace }}/bin/k3k
${{ github.workspace }}/bin/k3kcli
- name: Docker Hub Login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
push: true
tags: husseingalal/k3k:${{ github.ref_name }}
file: ./package/Dockerfile
context: .
- name: Checkout code
uses: actions/checkout@v4
- name: Cross Arch Build
run: |
make ci
env:
CROSS: "true"
- name: "Read secrets"
uses: rancher-eio/read-vault-secrets@main
with:
secrets: |
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ;
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD ;
- name: release binaries
uses: softprops/action-gh-release@v2
with:
files: |
bin/*
- name: Login to Container Registry
uses: docker/login-action@v3
with:
username: ${{ env.DOCKER_USERNAME }}
password: ${{ env.DOCKER_PASSWORD }}
- name: Build container image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: rancher/k3k:${{ github.ref_name }}
file: package/Dockerfile
platforms: linux/amd64

3
.gitignore vendored
View File

@@ -4,4 +4,5 @@
/dist
*.swp
.idea
.vscode/
__debug*

View File

@@ -1,21 +1,24 @@
ARG GOLANG=golang:1.19.5-alpine3.17
ARG GOLANG=rancher/hardened-build-base:v1.22.2b1
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
ENV ARCH $DAPPER_HOST_ARCH
RUN apk -U add bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
fi
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
&& mv cr /bin/
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS
ENV DAPPER_SOURCE /go/src/github.com/galal-hussein/k3k/
ENV DAPPER_OUTPUT ./bin ./dist
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./scripts/entry"]
ENTRYPOINT ["./ops/entry"]
CMD ["ci"]

View File

@@ -1,5 +1,4 @@
TARGETS := $(shell ls scripts)
TARGETS := $(shell ls ops)
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
@@ -12,4 +11,4 @@ $(TARGETS): .dapper
.DEFAULT_GOAL := default
.PHONY: $(TARGETS)
.PHONY: $(TARGETS)

100
README.md
View File

@@ -2,47 +2,121 @@
A Kubernetes in Kubernetes tool, k3k provides a way to run multiple embedded isolated k3s clusters on your kubernetes cluster.
## Why?
## Example
![alt text](https://github.com/galal-hussein/k3k/blob/main/hack/becausewecan.jpg?raw=true)
An example on creating a k3k cluster on an RKE2 host using k3kcli
[![asciicast](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp.svg)](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
## Usage
## Architecture
K3K consists of a controller and a cli tool, the controller can be deployed via a helm chart and the cli can be downloaded from the releases page.
### Deploy Controller
### Controller
The K3K controller will watch a CRD called `clusters.k3k.io`. Once found, the controller will create a separate namespace and it will create a K3S cluster as specified in the spec of the object.
Each server and agent is created as a separate pod that runs in the new namespace.
### CLI
The CLI provides a quick and easy way to create K3K clusters using simple flags, and automatically exposes the K3K clusters so it's accessible via a kubeconfig.
## Features
### Isolation
Each cluster runs in a sperate namespace that can be isolated via netowrk policies and RBAC rules, clusters also run in a sperate network namespace with flannel as the backend CNI. Finally, each cluster has a separate datastore which can be persisted.
In addition, k3k offers a persistence feature that can help users to persist their datatstore, using dynamic storage class volumes.
### Portability and Customization
The "Cluster" object is considered the template of the cluster that you can re-use to spin up multiple clusters in a matter of seconds.
K3K clusters use K3S internally and leverage all options that can be passed to K3S. Each cluster is exposed to the host cluster via NodePort, LoadBalancers, and Ingresses.
| | Separate Namespace (for each tenant) | K3K | vcluster | Separate Cluster (for each tenant) |
|-----------------------|---------------------------------------|------------------------------|-----------------|------------------------------------|
| Isolation | Very weak | Very strong | strong | Very strong |
| Access for tenants | Very restricted | Built-in k8s RBAC / Rancher | Vclustser admin | Cluster admin |
| Cost | Very cheap | Very cheap | cheap | expensive |
| Overhead | Very low | Very low | Very low | Very high |
| Networking | Shared | Separate | shared | separate |
| Cluster Configuration | | Very easy | Very hard | |
## Usage
### Deploy K3K Controller
[Helm](https://helm.sh) must be installed to use the charts. Please refer to
Helm's [documentation](https://helm.sh/docs) to get started.
Once Helm has been set up correctly, add the repo as follows:
helm repo add k3k https://galal-hussein.github.io/k3k
```sh
helm repo add k3k https://rancher.github.io/k3k
```
If you had already added this repo earlier, run `helm repo update` to retrieve
the latest versions of the packages. You can then run `helm search repo
k3k` to see the charts.
k3k --devel` to see the charts.
To install the k3k chart:
helm install my-k3k k3k/k3k
```sh
helm install my-k3k k3k/k3k --devel
```
To uninstall the chart:
helm delete my-k3k
```sh
helm delete my-k3k
```
**NOTE: Since k3k is still under development, the chart is marked as a development chart, this means that you need to add the `--devel` flag to install it.**
### Create a new cluster
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
#### For linux and macOS
1 - Donwload the binary, linux dowload url:
```
wget https://github.com/galal-hussein/k3k/releases/download/v0.0.0-alpha2/k3k
chmod +x k3k
sudo cp k3k /usr/local/bin
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
macOS dowload url:
```
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
Then copy to local bin
```
chmod +x k3kcli
sudo cp k3kcli /usr/local/bin
```
#### For Windows
1 - Download the Binary:
Use PowerShell's Invoke-WebRequest cmdlet to download the binary:
```powershel
Invoke-WebRequest -Uri "https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli-windows" -OutFile "k3kcli.exe"
```
2 - Copy the Binary to a Directory in PATH:
To allow running the binary from any command prompt, you can copy it to a directory in your system's PATH. For example, copying it to C:\Users\<YourUsername>\bin (create this directory if it doesn't exist):
```
Copy-Item "k3kcli.exe" "C:\bin"
```
3 - Update Environment Variable (PATH):
If you haven't already added `C:\bin` (or your chosen directory) to your PATH, you can do it through PowerShell:
```
setx PATH "C:\bin;%PATH%"
```
To create a new cluster you can use:
```
```sh
k3k cluster create --name example-cluster --token test
```
```

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.1
appVersion: 0.0.0-alpha3
version: 0.1.5-r5
appVersion: 0.2.0

View File

@@ -0,0 +1,134 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusters.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
name:
type: string
version:
type: string
servers:
type: integer
x-kubernetes-validations:
- message: cluster must have at least one server
rule: self >= 1
agents:
type: integer
x-kubernetes-validations:
- message: invalid value for agents
rule: self >= 0
token:
type: string
x-kubernetes-validations:
- message: token is immutable
rule: self == oldSelf
clusterCIDR:
type: string
x-kubernetes-validations:
- message: clusterCIDR is immutable
rule: self == oldSelf
serviceCIDR:
type: string
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
clusterDNS:
type: string
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
serverArgs:
type: array
items:
type: string
agentArgs:
type: array
items:
type: string
tlsSANs:
type: array
items:
type: string
persistence:
type: object
properties:
type:
type: string
default: "ephermal"
storageClassName:
type: string
storageRequestSize:
type: string
addons:
type: array
items:
type: object
properties:
secretNamespace:
type: string
secretRef:
type: string
expose:
type: object
properties:
ingress:
type: object
properties:
enabled:
type: boolean
ingressClassName:
type: string
loadbalancer:
type: object
properties:
enabled:
type: boolean
nodePort:
type: object
properties:
enabled:
type: boolean
status:
type: object
properties:
overrideClusterCIDR:
type: boolean
clusterCIDR:
type: string
overrideServiceCIDR:
type: boolean
serviceCIDR:
type: string
clusterDNS:
type: string
tlsSANs:
type: array
items:
type: string
persistence:
type: object
properties:
type:
type: string
default: "ephermal"
storageClassName:
type: string
storageRequestSize:
type: string
scope: Cluster
names:
plural: clusters
singular: cluster
kind: Cluster

View File

@@ -2,10 +2,10 @@ replicaCount: 1
namespace: k3k-system
image:
repository: husseingalal/k3k
pullPolicy: IfNotPresent
repository: rancher/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.0.0-alpha3"
tag: "v0.2.1"
imagePullSecrets: []
nameOverride: ""
@@ -16,4 +16,4 @@ serviceAccount:
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
name: ""

View File

@@ -1,27 +1,33 @@
package cluster
import (
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds"
"github.com/urfave/cli"
)
var clusterSubcommands = []cli.Command{
var subcommands = []cli.Command{
{
Name: "create",
Usage: "Create new cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: createCluster,
Action: create,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
},
{
Name: "delete",
Usage: "Delete an existing cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: delete,
Flags: append(cmds.CommonFlags, clusterDeleteFlags...),
},
}
func NewClusterCommand() cli.Command {
cmd := cli.Command{
func NewCommand() cli.Command {
return cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
Subcommands: subcommands,
}
return cmd
}

View File

@@ -3,21 +3,39 @@ package cluster
import (
"context"
"errors"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Scheme = runtime.NewScheme()
Scheme = runtime.NewScheme()
backoff = wait.Backoff{
Steps: 5,
Duration: 20 * time.Second,
Factor: 2,
Jitter: 0.1,
}
)
func init() {
@@ -26,15 +44,17 @@ func init() {
}
var (
name string
token string
clusterCIDR string
serviceCIDR string
servers int64
agents int64
serverArgs cli.StringSlice
agentArgs cli.StringSlice
version string
name string
token string
clusterCIDR string
serviceCIDR string
servers int64
agents int64
serverArgs cli.StringSlice
agentArgs cli.StringSlice
persistenceType string
storageClassName string
version string
clusterCreateFlags = []cli.Flag{
cli.StringFlag{
@@ -68,6 +88,17 @@ var (
Usage: "service CIDR",
Destination: &serviceCIDR,
},
cli.StringFlag{
Name: "persistence-type",
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
Value: server.EphermalNodesType,
Destination: &persistenceType,
},
cli.StringFlag{
Name: "storage-class-name",
Usage: "Storage class name for dynamic persistence type",
Destination: &storageClassName,
},
cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
@@ -82,12 +113,12 @@ var (
Name: "version",
Usage: "k3s version",
Destination: &version,
Value: "v1.26.1+k3s1",
Value: "v1.26.1-k3s1",
},
}
)
func createCluster(clx *cli.Context) error {
func create(clx *cli.Context) error {
ctx := context.Background()
if err := validateCreateFlags(clx); err != nil {
return err
@@ -101,10 +132,11 @@ func createCluster(clx *cli.Context) error {
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("creating a new cluster [%s]", name)
logrus.Infof("Creating a new cluster [%s]", name)
cluster := newCluster(
name,
token,
@@ -116,22 +148,81 @@ func createCluster(clx *cli.Context) error {
agentArgs,
)
return ctrlClient.Create(ctx, cluster)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Enabled: true,
},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
cluster.Spec.TLSSANs = []string{host[0]}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
} else {
return err
}
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
cfg := &kubeconfig.KubeConfig{
CN: util.AdminCommonName,
ORG: []string{user.SystemPrivilegedGroup},
ExpiryDate: 0,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
}
func validateCreateFlags(clx *cli.Context) error {
if persistenceType != server.EphermalNodesType &&
persistenceType != server.DynamicNodesType {
return errors.New("invalid persistence type")
}
if token == "" {
return errors.New("empty cluster token")
}
if name == "" {
return errors.New("empty cluster name")
}
if name == cluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if servers <= 0 {
return errors.New("invalid number of servers")
}
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
return errors.New("empty kubeconfig")
}
return nil
}
@@ -145,7 +236,6 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.ClusterSpec{
Name: name,
Token: token,
Servers: &servers,
Agents: &agents,
@@ -154,6 +244,10 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
ServerArgs: serverArgs,
AgentArgs: agentArgs,
Version: version,
Persistence: &v1alpha1.PersistenceConfig{
Type: persistenceType,
StorageClassName: storageClassName,
},
},
}
}

View File

@@ -1 +1,47 @@
package cluster
import (
"context"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
clusterDeleteFlags = []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
}
)
func delete(clx *cli.Context) error {
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("deleting [%s] cluster", name)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
return ctrlClient.Delete(ctx, &cluster)
}

View File

@@ -0,0 +1,169 @@
package kubeconfig
import (
"context"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
var (
Scheme = runtime.NewScheme()
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
backoff = wait.Backoff{
Steps: 5,
Duration: 20 * time.Second,
Factor: 2,
Jitter: 0.1,
}
generateKubeconfigFlags = []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: util.AdminCommonName,
},
cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
Value: 356,
},
}
)
var subcommands = []cli.Command{
{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: generate,
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
},
}
func NewCommand() cli.Command {
return cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
}
}
func generate(clx *cli.Context) error {
var cluster v1alpha1.Cluster
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: name,
}
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
certAltNames := kubeconfig.AddSANs(altNames)
if org == nil {
org = cli.StringSlice{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: org,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
if configName == "" {
configName = cluster.Name + "-kubeconfig.yaml"
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, configName))
return os.WriteFile(configName, kubeconfig, 0644)
}

View File

@@ -3,19 +3,26 @@ package main
import (
"os"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/cli/cmds/cluster"
"github.com/galal-hussein/k3k/pkg/version"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/cli/cmds/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
program = "k3k"
version = "dev"
gitCommit = "HEAD"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
cluster.NewCommand(),
kubeconfig.NewCommand(),
}
app.Version = version.Version + " (" + version.GitCommit + ")"
app.Version = version + " (" + gitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)

View File

@@ -1,38 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cidrallocationpools.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
defaultClusterCIDR:
type: string
status:
type: object
properties:
pool:
type: array
items:
type: object
properties:
clusterName:
type: string
issued:
type: integer
ipNet:
type: string
scope: Cluster
names:
plural: cidrallocationpools
singular: cidrallocationpool
kind: CIDRAllocationPool

View File

@@ -1,70 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusters.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
name:
type: string
version:
type: string
servers:
type: integer
agents:
type: integer
token:
type: string
clusterCIDR:
type: string
serviceCIDR:
type: string
clusterDNS:
type: string
serverArgs:
type: array
items:
type: string
agentArgs:
type: array
items:
type: string
expose:
type: object
properties:
ingress:
type: object
properties:
enabled:
type: boolean
ingressClassName:
type: string
loadbalancer:
type: object
properties:
enabled:
type: boolean
status:
type: object
properties:
clusterCIDR:
type: string
serviceCIDR:
type: string
clusterDNS:
type: string
scope: Cluster
names:
plural: clusters
singular: cluster
kind: Cluster

21
go.mod
View File

@@ -1,10 +1,17 @@
module github.com/galal-hussein/k3k
module github.com/rancher/k3k
go 1.19
go 1.20
replace (
go.etcd.io/etcd/api/v3 => github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1
go.etcd.io/etcd/client/v3 => github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1
)
require (
github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.12
go.etcd.io/etcd/api/v3 v3.5.9
go.etcd.io/etcd/client/v3 v3.5.5
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.26.1
@@ -14,6 +21,8 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
@@ -39,12 +48,18 @@ require (
github.com/prometheus/procfs v0.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/time v0.3.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.26.0 // indirect
@@ -67,7 +82,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiserver v0.26.1
k8s.io/klog/v2 v2.80.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.1
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect

45
go.sum
View File

@@ -39,6 +39,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -52,6 +54,15 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -64,6 +75,8 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
@@ -71,6 +84,7 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -96,6 +110,7 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -142,6 +157,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -161,6 +177,7 @@ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -179,6 +196,10 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1 h1:y4ont0HdnS7gtWNTXM8gahpKjAHtctgON/sjVRthlZY=
github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1 h1:Knr/8l7Sx92zUyevYO0gIO5P6EEc6ztvRO5EzSnMy+A=
github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -246,6 +267,7 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rancher/dynamiclistener v0.3.5 h1:5TaIHvkDGmZKvc96Huur16zfTKOiLhDtK4S+WV0JA6A=
github.com/rancher/dynamiclistener v0.3.5/go.mod h1:dW/YF6/m2+uEyJ5VtEcd9THxda599HP6N9dSXk81+k0=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -266,6 +288,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
@@ -276,15 +299,21 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -350,6 +379,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -407,8 +437,11 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -425,6 +458,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
@@ -529,6 +563,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
@@ -536,6 +571,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -548,6 +585,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -560,6 +602,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
@@ -573,6 +617,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

Binary file not shown.

Before

Width:  |  Height:  |  Size: 137 KiB

View File

@@ -4,18 +4,17 @@ set -o errexit
set -o nounset
set -o pipefail
set -x
CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=./code-generator
"${CODEGEN_PKG}/generate-groups.sh" \
"deepcopy" \
github.com/galal-hussein/k3k/pkg/generated \
github.com/galal-hussein/k3k/pkg/apis \
"k3k.io:v1alpha1" \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../../.."
source ${CODEGEN_PKG}/kube_codegen.sh
kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/pkg/apis"
rm -rf code-generator

View File

@@ -5,8 +5,8 @@ import (
"context"
"flag"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
@@ -16,9 +16,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
Scheme = runtime.NewScheme()
)
var Scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(Scheme)

6
manifest-runtime.tmpl Normal file
View File

@@ -0,0 +1,6 @@
image: rancher/k3k:{{replace "+" "-" build.tag}}
manifests:
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
platform:
architecture: amd64
os: linux

35
ops/build Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p bin deploy
if [ "$(uname)" = "Linux" ]; then
OTHER_LINKFLAGS="-extldflags -static -s"
fi
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-aarch64
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-s390x ./cli
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-arm64 ./cli
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd ./cli
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-amd64 ./cli
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-aarch64 ./cli
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi

View File

View File

@@ -2,8 +2,8 @@
set -e
mkdir -p bin dist
if [ -e ./scripts/$1 ]; then
./scripts/"$@"
if [ -e ./ops/$1 ]; then
./ops/"$@"
else
exec "$@"
fi

31
ops/index-chart Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
git fetch --tags
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
if [ $(git tag -l "$version") ]; then
echo "tag already exists"
exit 1
fi
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher
# push to gh-pages
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
git config --global user.name "galal-hussein"
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
# push index.yaml to gh-pages
git add index.yaml
git commit -m "add chart-${CHART_TAG} to index.yaml"
git push --force --set-upstream origin HEAD:gh-pages

10
ops/package-chart Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p deploy/
cr package --package-path deploy/ charts/k3k

View File

@@ -5,7 +5,7 @@ cd $(dirname $0)/..
go generate
source ./scripts/version
source ./ops/version
if [ -n "$DIRTY" ]; then
echo Git is dirty

View File

@@ -5,7 +5,7 @@ if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
fi
COMMIT=$(git rev-parse --short HEAD)
GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)}
GIT_TAG=${TAG:-$(git tag -l --contains HEAD | head -n 1)}
if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then
VERSION=$GIT_TAG
@@ -19,9 +19,15 @@ fi
SUFFIX="-${ARCH}"
TAG=${TAG:-${VERSION}${SUFFIX}}
REPO=${REPO:-husseingalal}
if echo $TAG | grep -q dirty; then
if [[ $VERSION = "chart*" ]]; then
TAG=${TAG:-${VERSION}}
else
TAG=${TAG:-${VERSION}${SUFFIX}}
fi
REPO=${REPO:-rancher}
if echo $TAG | grep dirty; then
TAG=dev
fi

View File

@@ -1,7 +1,7 @@
package v1alpha1
import (
k3k "github.com/galal-hussein/k3k/pkg/apis/k3k.io"
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -19,7 +19,9 @@ func Resource(resource string) schema.GroupResource {
}
func addKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion, &Cluster{}, &ClusterList{})
s.AddKnownTypes(SchemeGroupVersion,
&Cluster{},
&ClusterList{})
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}

View File

@@ -16,19 +16,25 @@ type Cluster struct {
}
type ClusterSpec struct {
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Addons []Addon `json:"addons,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
Persistence *PersistenceConfig `json:"persistence,omitempty"`
Expose *ExposeConfig `json:"expose,omitempty"`
}
Expose ExposeConfig `json:"expose,omitempty"`
type Addon struct {
SecretNamespace string `json:"secretNamespace,omitempty"`
SecretRef string `json:"secretRef,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -40,9 +46,17 @@ type ClusterList struct {
Items []Cluster `json:"items"`
}
type PersistenceConfig struct {
// Type can be ephermal, static, dynamic
Type string `json:"type"`
StorageClassName string `json:"storageClassName,omitempty"`
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
type ExposeConfig struct {
Ingress *IngressConfig `json:"ingress"`
LoadBalancer *LoadBalancerConfig `json:"loadbalancer"`
NodePort *NodePortConfig `json:"nodePort"`
}
type IngressConfig struct {
@@ -54,41 +68,14 @@ type LoadBalancerConfig struct {
Enabled bool `json:"enabled"`
}
type NodePortConfig struct {
Enabled bool `json:"enabled"`
}
type ClusterStatus struct {
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
}
type Allocation struct {
ClusterName string `json:"clusterName"`
Issued int64 `json:"issued"`
IPNet string `json:"ipNet"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPool struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Spec CIDRAllocationPoolSpec `json:"spec"`
Status CIDRAllocationPoolStatus `json:"status"`
}
type CIDRAllocationPoolSpec struct {
DefaultClusterCIDR string `json:"defaultClusterCIDR"`
}
type CIDRAllocationPoolStatus struct {
Pool []Allocation `json:"pool"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPoolList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []CIDRAllocationPool `json:"items"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Persistence *PersistenceConfig `json:"persistence,omitempty"`
}

View File

@@ -10,115 +10,17 @@ import (
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Allocation) DeepCopyInto(out *Allocation) {
func (in *Addon) DeepCopyInto(out *Addon) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Allocation.
func (in *Allocation) DeepCopy() *Allocation {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
func (in *Addon) DeepCopy() *Addon {
if in == nil {
return nil
}
out := new(Allocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPool) DeepCopyInto(out *CIDRAllocationPool) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPool.
func (in *CIDRAllocationPool) DeepCopy() *CIDRAllocationPool {
if in == nil {
return nil
}
out := new(CIDRAllocationPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolList) DeepCopyInto(out *CIDRAllocationPoolList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CIDRAllocationPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolList.
func (in *CIDRAllocationPoolList) DeepCopy() *CIDRAllocationPoolList {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolSpec) DeepCopyInto(out *CIDRAllocationPoolSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolSpec.
func (in *CIDRAllocationPoolSpec) DeepCopy() *CIDRAllocationPoolSpec {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolStatus) DeepCopyInto(out *CIDRAllocationPoolStatus) {
*out = *in
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = make([]Allocation, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolStatus.
func (in *CIDRAllocationPoolStatus) DeepCopy() *CIDRAllocationPoolStatus {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolStatus)
out := new(Addon)
in.DeepCopyInto(out)
return out
}
@@ -129,7 +31,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
in.Status.DeepCopyInto(&out.Status)
return
}
@@ -207,7 +109,26 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Expose.DeepCopyInto(&out.Expose)
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
**out = **in
}
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
}
return
}
@@ -224,6 +145,16 @@ func (in *ClusterSpec) DeepCopy() *ClusterSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
**out = **in
}
return
}
@@ -250,6 +181,11 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
*out = new(LoadBalancerConfig)
**out = **in
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(NodePortConfig)
**out = **in
}
return
}
@@ -294,3 +230,35 @@ func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig.
func (in *NodePortConfig) DeepCopy() *NodePortConfig {
if in == nil {
return nil
}
out := new(NodePortConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
if in == nil {
return nil
}
out := new(PersistenceConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,50 +0,0 @@
package addressallocator
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
AddressAllocatorController = "address-allocator-controller"
)
type AddressAllocatorReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := AddressAllocatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
controller, err := controller.New(AddressAllocatorController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}},
&handler.EnqueueRequestForObject{})
}
// Reconcile will allocate cluster/service cidrs to new clusters
func (r *AddressAllocatorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

View File

@@ -1,17 +1,29 @@
package agent
import (
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
image := util.K3SImage(cluster)
const agentName = "k3k-agent"
type Agent struct {
cluster *v1alpha1.Cluster
}
func New(cluster *v1alpha1.Cluster) *Agent {
return &Agent{
cluster: cluster,
}
}
func (a *Agent) Deploy() *apps.Deployment {
image := util.K3SImage(a.cluster)
const name = "k3k-agent"
@@ -21,17 +33,91 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
Name: a.cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(a.cluster),
},
Spec: apps.DeploymentSpec{
Replicas: cluster.Spec.Agents,
Replicas: a.cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": a.cluster.Name,
"type": "agent",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": a.cluster.Name,
"type": "agent",
},
},
Spec: a.podSpec(image, name, a.cluster.Spec.AgentArgs, false),
},
},
}
}
func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
image := util.K3SImage(cluster)
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "Statefulset",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.StatefulSetSpec{
ServiceName: cluster.Name + "-" + agentName + "-headless",
Replicas: cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Status.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Status.Persistence.StorageClassName,
},
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
@@ -39,16 +125,15 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
"type": "agent",
},
},
Spec: agentPodSpec(image, name, cluster.Spec.AgentArgs),
Spec: a.podSpec(image, agentName, cluster.Spec.AgentArgs, true),
},
},
}
}
func agentPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
return v1.PodSpec{
func (a *Agent) podSpec(image, name string, args []string, statefulSet bool) v1.PodSpec {
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
@@ -82,18 +167,6 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
@@ -106,17 +179,12 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s agent --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@@ -157,4 +225,22 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
},
},
}
if !statefulSet {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
return podSpec
}

View File

@@ -0,0 +1,30 @@
package agent
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (a *Agent) StatefulAgentService(cluster *v1alpha1.Cluster) *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName + "-headless",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "agent",
},
Ports: []v1.ServicePort{},
},
}
}

View File

@@ -1,120 +0,0 @@
package cluster
import (
"context"
"fmt"
"net"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/types"
)
const (
cidrAllocationClusterPoolName = "k3k-cluster-cidr-allocation-pool"
cidrAllocationServicePoolName = "k3k-service-cidr-allocation-pool"
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
// determineOctet dertermines the octet for the
// given mask bits of a subnet.
func determineOctet(mb int) uint8 {
switch {
case mb <= 8:
return 1
case mb >= 8 && mb <= 16:
return 2
case mb >= 8 && mb <= 24:
return 3
case mb >= 8 && mb <= 32:
return 4
default:
return 0
}
}
// generateSubnets generates all subnets for the given CIDR.
func generateSubnets(cidr string) ([]string, error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
usedBits, _ := ipNet.Mask.Size()
octet := determineOctet(usedBits)
ip := ipNet.IP.To4()
octetVal := ip[octet-1]
var subnets []string
for i := octetVal; i < 254; i++ {
octetVal++
ip[octet-1] = octetVal
subnets = append(subnets, fmt.Sprintf("%s/%d", ip, usedBits))
}
return subnets, nil
}
// nextCIDR retrieves the next available CIDR address from the given pool.
func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) (*net.IPNet, error) {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return nil, err
}
var ipNet *net.IPNet
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == "" && cidrPool.Status.Pool[i].Issued == 0 {
cidrPool.Status.Pool[i].ClusterName = clusterName
cidrPool.Status.Pool[i].Issued = time.Now().Unix()
_, ipn, err := net.ParseCIDR(cidrPool.Status.Pool[i].IPNet)
if err != nil {
return nil, err
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return nil, err
}
ipNet = ipn
break
}
}
return ipNet, nil
}
// releaseCIDR updates the given CIDR pool by marking the address as available.
func (c *ClusterReconciler) releaseCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) error {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return err
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == clusterName {
cidrPool.Status.Pool[i].ClusterName = ""
cidrPool.Status.Pool[i].Issued = 0
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,583 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"reflect"
"strings"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
maxConcurrentReconciles = 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
)
type ClusterReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: maxConcurrentReconciles,
})
if err != nil {
return err
}
if err := controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{}); err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1.Pod{}},
&handler.EnqueueRequestForOwner{IsController: true, OwnerType: &apps.StatefulSet{}})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
cluster v1alpha1.Cluster
podList v1.PodList
clusterName string
)
if req.Namespace != "" {
s := strings.Split(req.Namespace, "-")
if len(s) <= 1 {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster namespace", nil)
}
clusterName = s[1]
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
klog.Infof("Handle etcd server pod [%s/%s]", pod.Namespace, pod.Name)
if err := c.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to handle etcd pod", err)
}
}
return reconcile.Result{}, nil
}
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to add cluster finalizer", err)
}
}
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := ctrlruntimeclient.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
if err := c.createCluster(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to create cluster", err)
}
return reconcile.Result{}, nil
}
// remove finalizer from the server pods and update them.
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: util.ClusterNamespace(&cluster)}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to remove etcd finalizer", err)
}
}
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove finalizer from the cluster and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to remove cluster finalizer", err)
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
if err := c.validate(cluster); err != nil {
klog.Errorf("invalid change: %v", err)
return nil
}
s := server.New(cluster, c.Client)
if cluster.Spec.Persistence != nil {
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
}
}
if err := c.Client.Update(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to update cluster with persistence type", err)
}
// create a new namespace for the cluster
if err := c.createNamespace(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to create ns", err)
}
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
}
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster, s)
if err != nil {
return util.LogAndReturnErr("failed to create cluster service", err)
}
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.LogAndReturnErr("failed to create cluster configs", err)
}
// creating statefulsets in case the user chose a persistence type other than ephermal
if err := c.server(ctx, cluster, s); err != nil {
return util.LogAndReturnErr("failed to create servers", err)
}
if err := c.agent(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to create agents", err)
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := s.Ingress(ctx, c.Client)
if err != nil {
return util.LogAndReturnErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create server ingress", err)
}
}
}
}
bootstrapSecret, err := bootstrap.Generate(ctx, cluster, serviceIP)
if err != nil {
return util.LogAndReturnErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, bootstrapSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create kubeconfig secret", err)
}
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create ns", err)
}
}
return nil
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.Server(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := config.Server(cluster, false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create agents configuration
agentsConfig := agentConfig(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := ctrlruntimeclient.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
// create headless service for the statefulset
serverStatefulService := server.StatefulServerService(cluster)
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
ServerStatefulSet, err := server.StatefulServer(ctx, cluster)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, ServerStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.ensure(ctx, ServerStatefulSet, false); err != nil {
return err
}
return nil
}
func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster) error {
agent := agent.New(cluster)
agentsDeployment := agent.Deploy()
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := c.ensure(ctx, agentsDeployment, false); err != nil {
return err
}
return nil
}
func agentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "k3k-agent-config",
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}
}
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
}
func (c *ClusterReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
if _, ok := pod.Labels["role"]; ok {
if pod.Labels["role"] != "server" {
return nil
}
} else {
return errors.New("server pod has no role label")
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := c.getETCDTLS(&cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
"https://k3k-server-service." + pod.Namespace + ":2379",
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
return c.Client.Update(ctx, pod)
}
return nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
logrus.Infof("Removing name=%s id=%d address=%s from etcd", member.Name, member.ID, address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (c *ClusterReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config, error) {
klog.Infof("generating etcd TLS client certificate for cluster [%s]", cluster.Name)
token := cluster.Spec.Token
endpoint := "k3k-server-service." + util.ClusterNamespace(cluster)
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := kubeconfig.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
}
return nil
}
func (c *ClusterReconciler) ensure(ctx context.Context, obj ctrlruntimeclient.Object, requiresRecreate bool) error {
exists := true
existingObject := obj.DeepCopyObject().(ctrlruntimeclient.Object)
if err := c.Client.Get(ctx, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, existingObject); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get Object(%T): %w", existingObject, err)
}
exists = false
}
if !exists {
// if not exists create object
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
return nil
}
// if exists then apply udpate or recreate if necessary
if reflect.DeepEqual(obj.(metav1.Object), existingObject.(metav1.Object)) {
return nil
}
if !requiresRecreate {
if err := c.Client.Update(ctx, obj); err != nil {
return err
}
} else {
// this handles object that needs recreation including configmaps and secrets
if err := c.Client.Delete(ctx, obj); err != nil {
return err
}
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
}
return nil
}

View File

@@ -3,14 +3,14 @@ package config
import (
"fmt"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentConfigData(serviceIP, cluster.Spec.Token)
func Agent(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -27,7 +27,8 @@ func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
}
}
func agentConfigData(serviceIP, token string) string {
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
token: %s
with-node-id: true`, serviceIP, token)
}

View File

@@ -1,18 +1,24 @@
package config
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
name := "k3k-server-config"
if init {
name = "k3k-init-server-config"
}
cluster.Status.TLSSANs = append(cluster.Spec.TLSSANs,
serviceIP,
"k3k-server-service",
"k3k-server-service."+util.ClusterNamespace(cluster),
)
config := serverConfigData(serviceIP, cluster)
if init {
config = initConfigData(cluster)
@@ -33,7 +39,7 @@ func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.S
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
@@ -47,15 +53,22 @@ func serverOptions(cluster *v1alpha1.Cluster) string {
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Spec.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Spec.ClusterCIDR + "\n"
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Spec.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Spec.ServiceCIDR + "\n"
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Status.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Status.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}

View File

@@ -1,363 +0,0 @@
package cluster
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster/agent"
"github.com/galal-hussein/k3k/pkg/controller/cluster/config"
"github.com/galal-hussein/k3k/pkg/controller/cluster/server"
"github.com/galal-hussein/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
)
type ClusterReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrClusterPool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrServicePool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := client.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, util.WrapErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// TODO: handle CIDR deletion
if err := c.releaseCIDR(ctx, cluster.Status.ClusterCIDR, cluster.Name); err != nil {
return reconcile.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
if err := c.createNamespace(ctx, cluster); err != nil {
return util.WrapErr("failed to create ns", err)
}
if cluster.Spec.ClusterCIDR == "" && cluster.Status.ClusterCIDR == "" {
clusterCIDR, err := c.nextCIDR(ctx, cidrAllocationClusterPoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ClusterCIDR = clusterCIDR.String()
}
if cluster.Spec.ServiceCIDR == "" && cluster.Status.ServiceCIDR == "" {
serviceCIDR, err := c.nextCIDR(ctx, cidrAllocationServicePoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ServiceCIDR = serviceCIDR.String()
}
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
return util.WrapErr("failed to create cluster service", err)
}
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.WrapErr("failed to create cluster configs", err)
}
if err := c.createDeployments(ctx, cluster); err != nil {
return util.WrapErr("failed to create servers and agents deployment", err)
}
if cluster.Spec.Expose.Ingress.Enabled {
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
}
}
}
kubeconfigSecret, err := server.GenerateNewKubeConfig(ctx, cluster, serviceIP)
if err != nil {
return util.WrapErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create kubeconfig secret", err)
}
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create ns", err)
}
}
return nil
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.ServerConfig(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := config.ServerConfig(cluster, false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create agents configuration
agentsConfig := config.AgentConfig(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment := server.Server(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create deployment for the rest of the servers
serversDeployment := server.Server(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsDeployment := agent.Agent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}

View File

@@ -0,0 +1,166 @@
package bootstrap
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"net/http"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
)
type ControlRuntimeBootstrap struct {
ServerCA content `json:"serverCA"`
ServerCAKey content `json:"server"`
ClientCA content
ClientCAKey content
ETCDServerCA content
ETCDServerCAKey content
}
type content struct {
Timestamp string
Content string
}
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
var bootstrap *ControlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = requestBootstrap(token, ip)
return err
}); err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
bootstrapData, err := json.Marshal(bootstrap)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-bootstrap",
Namespace: "k3k-" + cluster.Name,
},
Data: map[string][]byte{
"bootstrap": bootstrapData,
},
}, nil
}
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var runtimeBootstrap ControlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
return &runtimeBootstrap, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
//client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
//etcd-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCA.Content = string(decoded)
//etcd-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCAKey.Content = string(decoded)
return nil
}
func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
return bootstrap, nil
}

View File

@@ -3,8 +3,7 @@ package server
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller/util"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -16,41 +15,42 @@ const (
nginxSSLPassthroughAnnotation = "nginx.ingress.kubernetes.io/ssl-passthrough"
nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol"
nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect"
serverPort = 6443
etcdPort = 2379
)
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster, client client.Client) (*networkingv1.Ingress, error) {
func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) {
addresses, err := util.Addresses(ctx, client)
if err != nil {
return nil, err
}
ingressRules := ingressRules(cluster, addresses)
ingressRules := s.ingressRules(addresses)
ingress := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-server-ingress",
Namespace: util.ClusterNamespace(cluster),
Name: s.cluster.Name + "-server-ingress",
Namespace: util.ClusterNamespace(s.cluster),
},
Spec: networkingv1.IngressSpec{
IngressClassName: &cluster.Spec.Expose.Ingress.IngressClassName,
IngressClassName: &s.cluster.Spec.Expose.Ingress.IngressClassName,
Rules: ingressRules,
},
}
configureIngressOptions(ingress, cluster.Spec.Expose.Ingress.IngressClassName)
configureIngressOptions(ingress, s.cluster.Spec.Expose.Ingress.IngressClassName)
return ingress, nil
}
func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.IngressRule {
ingressRules := []networkingv1.IngressRule{}
func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: cluster.Name + "." + address + wildcardDNS,
Host: s.cluster.Name + "." + address + wildcardDNS,
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
@@ -61,7 +61,7 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
Service: &networkingv1.IngressServiceBackend{
Name: "k3k-server-service",
Port: networkingv1.ServiceBackendPort{
Number: 6443,
Number: serverPort,
},
},
},
@@ -72,6 +72,7 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
}
ingressRules = append(ingressRules, rule)
}
return ingressRules
}

View File

@@ -1,237 +0,0 @@
package server
import (
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
certutil "github.com/rancher/dynamiclistener/cert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
)
const (
adminCommonName = "system:admin"
port = 6443
)
type controlRuntimeBootstrap struct {
ServerCA content
ServerCAKey content
ClientCA content
ClientCAKey content
}
type content struct {
Timestamp string
Content string
}
// GenerateNewKubeConfig generates the kubeconfig for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- generate client admin cert/key
// 3- use the ca cert from the bootstrap data & admin cert/key to write a new kubeconfig
// 4- save the new kubeconfig as a secret
func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
var bootstrap *controlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = requestBootstrap(token, ip)
return err
}); err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
adminCert, adminKey, err := createClientCertKey(
adminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", ip, port)
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"kubeconfig.yaml": kubeconfigData,
},
}, nil
}
func requestBootstrap(token, serverIP string) (*controlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: 5 * time.Second,
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Basic "+basicAuth("server", token))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var runtimeBootstrap controlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
return &runtimeBootstrap, nil
}
func createClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func decodeBootstrap(bootstrap *controlRuntimeBootstrap) error {
//client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
return nil
}

View File

@@ -1,72 +1,67 @@
package server
import (
"strconv"
"context"
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
var replicas int32
image := util.K3SImage(cluster)
const (
serverName = "k3k-"
k3kSystemNamespace = serverName + "system"
initServerName = serverName + "init-server"
initContainerName = serverName + "server-check"
initContainerImage = "alpine/curl"
name := "k3k-server"
if init {
name = "k3k-init-server"
}
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
)
replicas = *cluster.Spec.Servers - 1
if init {
replicas = 1
}
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: serverPodSpec(image, name, cluster.Spec.ServerArgs),
},
},
func New(cluster *v1alpha1.Cluster, client client.Client) *Server {
return &Server{
cluster: cluster,
client: client,
}
}
func serverPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
return v1.PodSpec{
func (s *Server) podSpec(ctx context.Context, image, name string, persistent bool) v1.PodSpec {
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "initconfig",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "k3k-init-server-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name + "-config",
SecretName: "k3k-server-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
@@ -94,18 +89,6 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
@@ -117,22 +100,37 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
{
Name: name,
Image: image,
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s server --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
`if [ ${POD_NAME: -1} == 0 ]; then
/bin/k3s server --config /opt/rancher/k3s/init/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
else /bin/k3s server --config /opt/rancher/k3s/server/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
fi
`,
},
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/",
MountPath: "/opt/rancher/k3s/server",
ReadOnly: false,
},
{
Name: "initconfig",
MountPath: "/opt/rancher/k3s/init",
ReadOnly: false,
},
{
@@ -169,4 +167,184 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
},
},
}
if !persistent {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
// Adding readiness probes to statefulset
podSpec.Containers[0].ReadinessProbe = &v1.Probe{
InitialDelaySeconds: 60,
FailureThreshold: 5,
TimeoutSeconds: 10,
ProbeHandler: v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromInt(6443),
},
},
}
return podSpec
}
func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) (*apps.StatefulSet, error) {
var (
replicas int32
pvClaims []v1.PersistentVolumeClaim
persistent bool
)
image := util.K3SImage(cluster)
name := serverName + "server"
replicas = *cluster.Spec.Servers
if cluster.Spec.Persistence != nil && cluster.Spec.Persistence.Type != EphermalNodesType {
persistent = true
pvClaims = []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
}
}
var volumes []v1.Volume
var volumeMounts []v1.VolumeMount
for _, addon := range s.cluster.Spec.Addons {
namespace := k3kSystemNamespace
if addon.SecretNamespace != "" {
namespace = addon.SecretNamespace
}
nn := types.NamespacedName{
Name: addon.SecretRef,
Namespace: namespace,
}
var addons v1.Secret
if err := s.client.Get(ctx, nn, &addons); err != nil {
return nil, err
}
clusterAddons := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addons.Name,
Namespace: util.ClusterNamespace(s.cluster),
},
Data: make(map[string][]byte, len(addons.Data)),
}
for k, v := range addons.Data {
clusterAddons.Data[k] = v
}
if err := s.client.Create(ctx, &clusterAddons); err != nil {
return nil, err
}
name := "varlibrancherk3smanifests" + addon.SecretRef
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: addon.SecretRef,
},
},
}
volumes = append(volumes, volume)
volumeMount := v1.VolumeMount{
Name: name,
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
// changes to this part of the filesystem shouldn't be done manually. The secret should be updated instead.
ReadOnly: true,
}
volumeMounts = append(volumeMounts, volumeMount)
}
podSpec := s.podSpec(ctx, image, name, persistent)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.StatefulSetSpec{
Replicas: &replicas,
ServiceName: cluster.Name + "-" + name + "-headless",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
},
},
VolumeClaimTemplates: pvClaims,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
},
},
Spec: podSpec,
},
},
}, nil
}

View File

@@ -1,13 +1,22 @@
package server
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
serviceType := v1.ServiceTypeClusterIP
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.NodePort != nil {
if cluster.Spec.Expose.NodePort.Enabled {
serviceType = v1.ServiceTypeNodePort
}
}
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -18,7 +27,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Type: serviceType,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
@@ -27,7 +36,46 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: 6443,
Port: serverPort,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: etcdPort,
},
},
},
}
}
func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service {
name := serverName
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name + "-headless",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
},
Ports: []v1.ServicePort{
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: serverPort,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: etcdPort,
},
},
},

View File

@@ -0,0 +1,71 @@
package kubeconfig
import (
"crypto"
"crypto/x509"
"fmt"
"net"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
)
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
ExpiresAt: expiresAt,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func AddSANs(sans []string) certutil.AltNames {
var altNames certutil.AltNames
for _, san := range sans {
ip := net.ParseIP(san)
if ip == nil {
altNames.DNSNames = append(altNames.DNSNames, san)
} else {
altNames.IPs = append(altNames.IPs, ip)
}
}
return altNames
}

View File

@@ -0,0 +1,108 @@
package kubeconfig
import (
"context"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type KubeConfig struct {
AltNames certutil.AltNames
CN string
ORG []string
ExpiryDate time.Duration
}
func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-bootstrap",
Namespace: util.ClusterNamespace(cluster),
}
var bootstrapSecret v1.Secret
if err := client.Get(ctx, nn, &bootstrapSecret); err != nil {
return nil, err
}
bootstrapData := bootstrapSecret.Data["bootstrap"]
if bootstrapData == nil {
return nil, errors.New("empty bootstrap")
}
var bootstrap bootstrap.ControlRuntimeBootstrap
if err := json.Unmarshal(bootstrapData, &bootstrap); err != nil {
return nil, err
}
adminCert, adminKey, err := CreateClientCertKey(
k.CN, k.ORG,
&k.AltNames, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, k.ExpiryDate,
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
return nil, err
}
// get the server service to extract the right IP
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, util.ServerPort)
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
url = fmt.Sprintf("https://%s:%d", hostServerIP, nodePort)
}
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return kubeconfigData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}

View File

@@ -3,7 +3,7 @@ package util
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -12,6 +12,12 @@ import (
const (
namespacePrefix = "k3k-"
k3SImageName = "rancher/k3s"
AdminCommonName = "system:admin"
ServerPort = 6443
)
const (
K3kSystemNamespace = namespacePrefix + "system"
)
func ClusterNamespace(cluster *v1alpha1.Cluster) string {
@@ -22,7 +28,7 @@ func K3SImage(cluster *v1alpha1.Cluster) string {
return k3SImageName + ":" + cluster.Spec.Version
}
func WrapErr(errString string, err error) error {
func LogAndReturnErr(errString string, err error) error {
klog.Errorf("%s: %v", errString, err)
return err
}
@@ -35,7 +41,8 @@ func nodeAddress(node *v1.Node) string {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
break
} else if ip.Type == "InternalIP" && ip.Address != "" {
}
if ip.Type == "InternalIP" && ip.Address != "" {
internalIP = ip.Address
}
}
@@ -53,8 +60,7 @@ func Addresses(ctx context.Context, client client.Client) ([]string, error) {
return nil, err
}
addresses := make([]string, len(nodeList.Items))
var addresses []string
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}

View File

@@ -1,10 +0,0 @@
package version
import "strings"
var (
Program = "k3k"
ProgramUpper = strings.ToUpper(Program)
Version = "dev"
GitCommit = "HEAD"
)

View File

@@ -1,26 +0,0 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p bin
if [ "$(uname)" = "Linux" ]; then
OTHER_LINKFLAGS="-extldflags -static -s"
fi
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi