Compare commits

..

36 Commits

Author SHA1 Message Date
Brian Downs
e426380828 add back cluster and service cidr
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-18 16:53:56 -07:00
Brian Downs
ecdec030fd fix remaining conflict
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-17 18:38:02 -07:00
Brian Downs
5e55b87c02 updates
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-16 14:01:47 -07:00
Brian Downs
79c7b8d36d resolve conflicts and other changes
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-16 13:14:49 -07:00
Waleed Malik
717808b03b Fix helm chart installation steps in documentation (#56)
Signed-off-by: Waleed Malik <ahmedwaleedmalik@gmail.com>
2023-08-08 23:33:53 +03:00
Hussein Galal
9dbd0bef44 Add cluster persistence with statefulsets (#55)
* Add cluster persistence with statefulsets

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix comments

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-08-08 23:23:55 +03:00
Brian Downs
def1746f1f Merge pull request #59 from briandowns/updates
remove some unused code and additional updates
2023-08-01 14:08:51 -04:00
Brian Downs
d32ce24d31 remove some unused code and additional updates
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-01 10:58:18 -07:00
Brian Downs
46965eb692 Merge pull request #58 from briandowns/update_go
update go version
2023-07-27 10:11:28 -07:00
Brian Downs
5bed1bd6ee update go version
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-07-27 10:06:59 -07:00
Hussein Galal
8968fe1d62 Fix docker image tag (#52)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-07-04 00:03:28 +03:00
Brad Davidson
84d3f768c6 Run k3s as pid 1 (#50)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
2023-06-27 02:43:01 +03:00
Hussein Galal
decf24cb2a K3k chart (#51)
* Add release chart drone action

* fix release charts

* Add deploy dir to Dapper

* Add remove build step from drone k3k-chart

* Fix repo and org name

* fix ci

* add index.yaml
2023-06-24 00:27:05 +03:00
Hussein Galal
861078fa85 Remove github workflow and add drone (#49) 2023-06-21 00:43:16 +03:00
Hussein Galal
da5ddb27b5 Use env variables in CI action (#48) 2023-06-20 21:27:35 +03:00
Hussein Galal
e1576343a8 Fix action ci (#47) 2023-06-20 21:02:41 +03:00
Hussein Galal
ff256a324b fix action (#46)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:57:20 +03:00
Hussein Galal
6318fc29bf use custom action (#45)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:43:39 +03:00
Hussein Galal
d9eafbb1d2 Use ibuildthecloud/github-release in github action (#44)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:21:33 +03:00
Hussein Galal
da3ba1b5ff Fix release ci (#43) 2023-06-20 19:27:40 +03:00
Hussein Galal
176deae781 Update the chart (#38)
* Update chart to v0.1.1-k3k2

* update image tag
2023-06-14 03:18:44 +03:00
Brian Downs
7ec204683f Merge pull request #39 from briandowns/secure_build
update to perform secure build and possible arm64 support
2023-06-13 17:18:35 -07:00
Brian Downs
fac92fb21a update to perform secure build and possible arm64 support
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 17:16:51 -07:00
Brian Downs
fb40f65c75 Merge pull request #37 from briandowns/update_package_refs 2023-06-13 17:00:42 -07:00
Brian Downs
b2e969f6df update package refs
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 16:45:46 -07:00
Hussein Galal
43d7779dfa Export k3k cluster kubeconfig in k3kcli (#36)
* Export k3k cluster kubeconfig in k3kcli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update readme and logs

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-13 19:48:57 +03:00
Hussein Galal
ea1e7e486f Revert CIDR pool allocation and fix delete (#35)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-28 23:45:57 +02:00
Hussein Galal
7bcc312b4b move crds to the helm chart (#34)
* Fixes to the controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move crds to the helm chart

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix statically configured cluster

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-23 21:33:52 +02:00
Hussein Galal
dde877e285 Fixes to the controller and cli (#33)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 18:00:07 +02:00
Hussein Galal
0682b11100 Add version to cli (#32)
* Add version flag to cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add version to cluster spec

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 14:36:06 +02:00
Hussein Galal
1f2595edfb Use new version scheme for the chart (#31) 2023-02-03 14:18:21 +02:00
Hussein Galal
b0f1fc1184 Remove cr.yaml file (#30) 2023-02-03 14:12:11 +02:00
Hussein Galal
8f5de4a5d2 Remove GITHUB_TOKEN from install helm (#29) 2023-02-03 14:09:42 +02:00
Hussein Galal
46491a4310 Fix charts and merge charts to main (#28) 2023-02-03 14:07:29 +02:00
Hussein Galal
a299353eca fix chart and github workflow (#27)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 13:41:24 +02:00
Hussein Galal
eb4e5dd099 Add readme and fix release (#25)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 06:38:50 +02:00
49 changed files with 1520 additions and 970 deletions

137
.drone.yml Normal file
View File

@@ -0,0 +1,137 @@
---
kind: pipeline
name: amd64
platform:
os: linux
arch: amd64
steps:
- name: build
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
exclude:
- k3k-chart
- name: package-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper package-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: release-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper release-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: github_binary_release
image: ibuildthecloud/github-release:v0.0.1
settings:
api_key:
from_secret: github_token
prerelease: true
checksum:
- sha256
checksum_file: CHECKSUMsum-amd64.txt
checksum_flatten: true
files:
- "bin/*"
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
- name: docker-publish
image: plugins/docker
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3k"
username:
from_secret: docker_username
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
type: docker
name: manifest
platform:
os: linux
arch: amd64
steps:
- name: push-runtime-manifest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
spec: manifest-runtime.tmpl
when:
event:
- tag
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
branch:
exclude:
- k3k-chart
depends_on:
- amd64

View File

@@ -1,32 +0,0 @@
name: Chart Release
on:
push:
branches:
- k3k-chart
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: charts
env:
CR_TOKEN: "${{ secrets.TOKEN }}"

View File

@@ -1,41 +0,0 @@
name: K3K Release
on:
push:
tags:
- "v*"
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build K3K
uses: addnab/docker-run-action@v3
with:
registry: docker.io
image: rancher/dapper:v0.5.5
options: -v ${{ github.workspace }}:/work -v /var/run/docker.sock:/var/run/docker.sock
run: |
cd /work && dapper ci
- name: Publish Binaries
uses: SierraSoftworks/gh-releases@v1.0.7
with:
token: ${{ secrets.TOKEN }}
overwrite: 'true'
files: |
${{ github.workspace }}/bin/k3k
- name: Docker Hub Login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
push: true
tags: husseingalal/k3k:${{ github.ref_name }}
file: ./package/Dockerfile
context: .

View File

@@ -1,21 +1,24 @@
ARG GOLANG=golang:1.19.5-alpine3.17
ARG GOLANG=rancher/hardened-build-base:v1.20.6b2
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
ENV ARCH $DAPPER_HOST_ARCH
RUN apk -U add bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
fi
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
&& mv cr /bin/
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS
ENV DAPPER_SOURCE /go/src/github.com/galal-hussein/k3k/
ENV DAPPER_OUTPUT ./bin ./dist
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./scripts/entry"]
ENTRYPOINT ["./ops/entry"]
CMD ["ci"]

View File

@@ -1,4 +1,4 @@
TARGETS := $(shell ls scripts)
TARGETS := $(shell ls ops)
.dapper:
@echo Downloading dapper

58
README.md Normal file
View File

@@ -0,0 +1,58 @@
# K3K
A Kubernetes in Kubernetes tool, k3k provides a way to run multiple embedded isolated k3s clusters on your kubernetes cluster.
## Example
An example on creating a k3k cluster on an RKE2 host using k3kcli
[![asciicast](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp.svg)](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
## Usage
K3K consists of a controller and a cli tool, the controller can be deployed via a helm chart and the cli can be downloaded from the releases page.
### Deploy Controller
[Helm](https://helm.sh) must be installed to use the charts. Please refer to
Helm's [documentation](https://helm.sh/docs) to get started.
Once Helm has been set up correctly, add the repo as follows:
```sh
helm repo add k3k https://rancher.github.io/k3k
```
If you had already added this repo earlier, run `helm repo update` to retrieve
the latest versions of the packages. You can then run `helm search repo
k3k --devel` to see the charts.
To install the k3k chart:
```sh
helm install my-k3k k3k/k3k --devel
```
To uninstall the chart:
```sh
helm delete my-k3k
```
**NOTE: Since k3k is still under development, the chart is marked as a development chart, this means that you need to add the `--devel` flag to install it.**
### Create a new cluster
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
```sh
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha6/k3kcli
chmod +x k3kcli
sudo cp k3kcli /usr/local/bin
```
To create a new cluster you can use:
```sh
k3k cluster create --name example-cluster --token test
```

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.0
appVersion: 0.1.0
version: 0.1.0-r1
appVersion: 0.0.0-alpha6

View File

@@ -39,6 +39,29 @@ spec:
type: array
items:
type: string
tlsSANs:
type: array
items:
type: string
persistence:
type: object
properties:
type:
type: string
default: "ephermal"
storageClassName:
type: string
storageRequestSize:
type: string
addons:
type: array
items:
type: object
properties:
secretNamespace:
type: string
secretRef:
type: string
expose:
type: object
properties:
@@ -54,11 +77,20 @@ spec:
properties:
enabled:
type: boolean
nodePort:
type: object
properties:
enabled:
type: boolean
status:
type: object
properties:
overrideClusterCIDR:
type: boolean
clusterCIDR:
type: string
overrideServiceCIDR:
type: boolean
serviceCIDR:
type: string
clusterDNS:

View File

@@ -23,12 +23,4 @@ spec:
- containerPort: 8080
name: https
protocol: TCP
livenessProbe:
httpGet:
path: /
port: https
readinessProbe:
httpGet:
path: /
port: https
serviceAccountName: {{ include "k3k.serviceAccountName" . }}

View File

@@ -2,10 +2,10 @@ replicaCount: 1
namespace: k3k-system
image:
repository: husseingalal/k3k
pullPolicy: IfNotPresent
repository: briandowns/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: ""
tag: "dev"
imagePullSecrets: []
nameOverride: ""
@@ -16,4 +16,4 @@ serviceAccount:
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
name: ""

View File

@@ -1,7 +1,7 @@
package cluster
import (
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds"
"github.com/urfave/cli"
)
@@ -17,11 +17,9 @@ var clusterSubcommands = []cli.Command{
}
func NewClusterCommand() cli.Command {
cmd := cli.Command{
return cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
}
return cmd
}

View File

@@ -3,21 +3,41 @@ package cluster
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Scheme = runtime.NewScheme()
Scheme = runtime.NewScheme()
backoff = wait.Backoff{
Steps: 5,
Duration: 3 * time.Second,
Factor: 2,
Jitter: 0.1,
}
)
func init() {
@@ -26,14 +46,17 @@ func init() {
}
var (
name string
token string
clusterCIDR string
serviceCIDR string
servers int64
agents int64
serverArgs cli.StringSlice
agentArgs cli.StringSlice
name string
token string
clusterCIDR string
serviceCIDR string
servers int64
agents int64
serverArgs cli.StringSlice
agentArgs cli.StringSlice
persistenceType string
storageClassName string
version string
clusterCreateFlags = []cli.Flag{
cli.StringFlag{
@@ -67,6 +90,17 @@ var (
Usage: "service CIDR",
Destination: &serviceCIDR,
},
cli.StringFlag{
Name: "persistence-type",
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
Value: cluster.EphermalNodesType,
Destination: &persistenceType,
},
cli.StringFlag{
Name: "storage-class-name",
Usage: "Storage class name for dynamic persistence type",
Destination: &storageClassName,
},
cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
@@ -77,6 +111,12 @@ var (
Usage: "agents extra arguments",
Value: &agentArgs,
},
cli.StringFlag{
Name: "version",
Usage: "k3s version",
Destination: &version,
Value: "v1.26.1-k3s1",
},
}
)
@@ -94,10 +134,11 @@ func createCluster(clx *cli.Context) error {
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("creating a new cluster [%s]", name)
logrus.Infof("Creating a new cluster [%s]", name)
cluster := newCluster(
name,
token,
@@ -109,10 +150,59 @@ func createCluster(clx *cli.Context) error {
agentArgs,
)
return ctrlClient.Create(ctx, cluster)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Enabled: true,
},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
cluster.Spec.TLSSANs = []string{host[0]}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
} else {
return err
}
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
}
func validateCreateFlags(clx *cli.Context) error {
if persistenceType != cluster.EphermalNodesType &&
persistenceType != cluster.DynamicNodesType {
return errors.New("invalid persistence type")
}
if token == "" {
return errors.New("empty cluster token")
}
@@ -125,6 +215,7 @@ func validateCreateFlags(clx *cli.Context) error {
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
return errors.New("empty kubeconfig")
}
return nil
}
@@ -146,6 +237,91 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
ServiceCIDR: serviceCIDR,
ServerArgs: serverArgs,
AgentArgs: agentArgs,
Version: version,
Persistence: &v1alpha1.PersistenceConfig{
Type: persistenceType,
StorageClassName: storageClassName,
},
},
}
}
func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, serverIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
}
var kubeSecret v1.Secret
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
return nil, err
}
kubeconfig := kubeSecret.Data["kubeconfig.yaml"]
if kubeconfig == nil {
return nil, errors.New("empty kubeconfig")
}
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
if err != nil {
return nil, err
}
hostURL := fmt.Sprintf("https://%s:%d", serverIP, nodePort)
restConfig.Host = hostURL
clientConfig := generateKubeconfigFromRest(restConfig)
b, err := clientcmd.Write(clientConfig)
if err != nil {
return nil, err
}
kubeconfig = b
}
return kubeconfig, nil
}
func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
clusters := make(map[string]*clientcmdapi.Cluster)
clusters["default-cluster"] = &clientcmdapi.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts["default-context"] = &clientcmdapi.Context{
Cluster: "default-cluster",
Namespace: "default",
AuthInfo: "default",
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos["default"] = &clientcmdapi.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: "default-context",
AuthInfos: authinfos,
}
return clientConfig
}

View File

@@ -3,19 +3,24 @@ package main
import (
"os"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/cli/cmds/cluster"
"github.com/galal-hussein/k3k/pkg/version"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
program = "k3k"
version = "dev"
gitCommit = "HEAD"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
}
app.Version = version.Version + " (" + version.GitCommit + ")"
app.Version = version + " (" + gitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)

View File

@@ -1,38 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cidrallocationpools.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
defaultClusterCIDR:
type: string
status:
type: object
properties:
pool:
type: array
items:
type: object
properties:
clusterName:
type: string
issued:
type: integer
ipNet:
type: string
scope: Cluster
names:
plural: cidrallocationpools
singular: cidrallocationpool
kind: CIDRAllocationPool

4
go.mod
View File

@@ -1,4 +1,4 @@
module github.com/galal-hussein/k3k
module github.com/rancher/k3k
go 1.19
@@ -67,7 +67,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiserver v0.26.1
k8s.io/klog/v2 v2.80.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.1
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect

BIN
hack/becausewecan.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 137 KiB

View File

@@ -12,8 +12,8 @@ CODEGEN_PKG=./code-generator
"${CODEGEN_PKG}/generate-groups.sh" \
"deepcopy" \
github.com/galal-hussein/k3k/pkg/generated \
github.com/galal-hussein/k3k/pkg/apis \
github.com/rancher/k3k/pkg/generated \
github.com/rancher/k3k/pkg/apis \
"k3k.io:v1alpha1" \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../../.."

View File

@@ -5,8 +5,8 @@ import (
"context"
"flag"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
@@ -16,9 +16,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
Scheme = runtime.NewScheme()
)
var Scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(Scheme)

6
manifest-runtime.tmpl Normal file
View File

@@ -0,0 +1,6 @@
image: rancher/k3k:{{replace "+" "-" build.tag}}
manifests:
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
platform:
architecture: amd64
os: linux

View File

@@ -5,10 +5,12 @@ source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p bin
mkdir -p bin deploy
if [ "$(uname)" = "Linux" ]; then
OTHER_LINKFLAGS="-extldflags -static -s"
fi
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k

View File

View File

@@ -2,8 +2,8 @@
set -e
mkdir -p bin dist
if [ -e ./scripts/$1 ]; then
./scripts/"$@"
if [ -e ./ops/$1 ]; then
./ops/"$@"
else
exec "$@"
fi

10
ops/package-chart Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p deploy/
cr package --package-path deploy/ charts/k3k

32
ops/release-chart Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
git fetch --tags
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
if [ $(git tag -l "$version") ]; then
echo "tag already exists"
exit 1
fi
# release the chart with artifacts
cr upload --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--git-repo k3k \
--skip-existing \
-o rancher
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher \
--push

View File

@@ -5,7 +5,7 @@ cd $(dirname $0)/..
go generate
source ./scripts/version
source ./ops/version
if [ -n "$DIRTY" ]; then
echo Git is dirty

View File

@@ -20,7 +20,7 @@ fi
SUFFIX="-${ARCH}"
TAG=${TAG:-${VERSION}${SUFFIX}}
REPO=${REPO:-husseingalal}
REPO=${REPO:-rancher}
if echo $TAG | grep -q dirty; then
TAG=dev

View File

@@ -1,7 +1,7 @@
package v1alpha1
import (
k3k "github.com/galal-hussein/k3k/pkg/apis/k3k.io"
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -19,7 +19,9 @@ func Resource(resource string) schema.GroupResource {
}
func addKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion, &Cluster{}, &ClusterList{})
s.AddKnownTypes(SchemeGroupVersion,
&Cluster{},
&ClusterList{})
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}

View File

@@ -16,19 +16,26 @@ type Cluster struct {
}
type ClusterSpec struct {
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Addons []Addon `json:"addons,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
Persistence *PersistenceConfig `json:"persistence,omitempty"`
Expose *ExposeConfig `json:"expose,omitempty"`
}
Expose ExposeConfig `json:"expose,omitempty"`
type Addon struct {
SecretNamespace string `json:"secretNamespace,omitempty"`
SecretRef string `json:"secretRef,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -40,9 +47,17 @@ type ClusterList struct {
Items []Cluster `json:"items"`
}
type PersistenceConfig struct {
// Type can be ephermal, static, dynamic
Type string `json:"type"`
StorageClassName string `json:"storageClassName,omitempty"`
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
type ExposeConfig struct {
Ingress *IngressConfig `json:"ingress"`
LoadBalancer *LoadBalancerConfig `json:"loadbalancer"`
NodePort *NodePortConfig `json:"nodePort"`
}
type IngressConfig struct {
@@ -54,41 +69,12 @@ type LoadBalancerConfig struct {
Enabled bool `json:"enabled"`
}
type NodePortConfig struct {
Enabled bool `json:"enabled"`
}
type ClusterStatus struct {
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
}
type Allocation struct {
ClusterName string `json:"clusterName"`
Issued int64 `json:"issued"`
IPNet string `json:"ipNet"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPool struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Spec CIDRAllocationPoolSpec `json:"spec"`
Status CIDRAllocationPoolStatus `json:"status"`
}
type CIDRAllocationPoolSpec struct {
DefaultClusterCIDR string `json:"defaultClusterCIDR"`
}
type CIDRAllocationPoolStatus struct {
Pool []Allocation `json:"pool"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPoolList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []CIDRAllocationPool `json:"items"`
}

View File

@@ -10,115 +10,17 @@ import (
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Allocation) DeepCopyInto(out *Allocation) {
func (in *Addon) DeepCopyInto(out *Addon) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Allocation.
func (in *Allocation) DeepCopy() *Allocation {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon.
func (in *Addon) DeepCopy() *Addon {
if in == nil {
return nil
}
out := new(Allocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPool) DeepCopyInto(out *CIDRAllocationPool) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPool.
func (in *CIDRAllocationPool) DeepCopy() *CIDRAllocationPool {
if in == nil {
return nil
}
out := new(CIDRAllocationPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolList) DeepCopyInto(out *CIDRAllocationPoolList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CIDRAllocationPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolList.
func (in *CIDRAllocationPoolList) DeepCopy() *CIDRAllocationPoolList {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolSpec) DeepCopyInto(out *CIDRAllocationPoolSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolSpec.
func (in *CIDRAllocationPoolSpec) DeepCopy() *CIDRAllocationPoolSpec {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolStatus) DeepCopyInto(out *CIDRAllocationPoolStatus) {
*out = *in
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = make([]Allocation, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolStatus.
func (in *CIDRAllocationPoolStatus) DeepCopy() *CIDRAllocationPoolStatus {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolStatus)
out := new(Addon)
in.DeepCopyInto(out)
return out
}
@@ -207,7 +109,26 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Expose.DeepCopyInto(&out.Expose)
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Addons != nil {
in, out := &in.Addons, &out.Addons
*out = make([]Addon, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
**out = **in
}
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
}
return
}
@@ -250,6 +171,11 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
*out = new(LoadBalancerConfig)
**out = **in
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(NodePortConfig)
**out = **in
}
return
}
@@ -294,3 +220,35 @@ func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig.
func (in *NodePortConfig) DeepCopy() *NodePortConfig {
if in == nil {
return nil
}
out := new(NodePortConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistenceConfig.
func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
if in == nil {
return nil
}
out := new(PersistenceConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,50 +0,0 @@
package addressallocator
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
AddressAllocatorController = "address-allocator-controller"
)
type AddressAllocatorReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := AddressAllocatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
controller, err := controller.New(AddressAllocatorController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}},
&handler.EnqueueRequestForObject{})
}
// Reconcile will allocate cluster/service cidrs to new clusters
func (r *AddressAllocatorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

View File

@@ -1,17 +1,29 @@
package agent
import (
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
image := util.K3SImage(cluster)
const agentName = "k3k-agent"
type Agent struct {
cluster *v1alpha1.Cluster
}
func New(cluster *v1alpha1.Cluster) *Agent {
return &Agent{
cluster: cluster,
}
}
func (a *Agent) Deploy() *apps.Deployment {
image := util.K3SImage(a.cluster)
const name = "k3k-agent"
@@ -21,17 +33,91 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
Name: a.cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(a.cluster),
},
Spec: apps.DeploymentSpec{
Replicas: cluster.Spec.Agents,
Replicas: a.cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": a.cluster.Name,
"type": "agent",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": a.cluster.Name,
"type": "agent",
},
},
Spec: a.podSpec(image, name, a.cluster.Spec.AgentArgs, false),
},
},
}
}
func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
image := util.K3SImage(cluster)
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "Statefulset",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.StatefulSetSpec{
ServiceName: cluster.Name + "-" + agentName + "-headless",
Replicas: cluster.Spec.Agents,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"type": "agent",
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
@@ -39,16 +125,15 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
"type": "agent",
},
},
Spec: agentPodSpec(image, name, cluster.Spec.AgentArgs),
Spec: a.podSpec(image, agentName, cluster.Spec.AgentArgs, true),
},
},
}
}
func agentPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
return v1.PodSpec{
func (a *Agent) podSpec(image, name string, args []string, statefulSet bool) v1.PodSpec {
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
@@ -82,18 +167,6 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
@@ -106,17 +179,12 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s agent --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@@ -157,4 +225,22 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
},
},
}
if !statefulSet {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
return podSpec
}

View File

@@ -0,0 +1,30 @@
package agent
import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (a *Agent) StatefulAgentService(cluster *v1alpha1.Cluster) *v1.Service {
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + agentName + "-headless",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "agent",
},
Ports: []v1.ServicePort{},
},
}
}

View File

@@ -1,120 +0,0 @@
package cluster
import (
"context"
"fmt"
"net"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/types"
)
const (
cidrAllocationClusterPoolName = "k3k-cluster-cidr-allocation-pool"
cidrAllocationServicePoolName = "k3k-service-cidr-allocation-pool"
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
// determineOctet dertermines the octet for the
// given mask bits of a subnet.
func determineOctet(mb int) uint8 {
switch {
case mb <= 8:
return 1
case mb >= 8 && mb <= 16:
return 2
case mb >= 8 && mb <= 24:
return 3
case mb >= 8 && mb <= 32:
return 4
default:
return 0
}
}
// generateSubnets generates all subnets for the given CIDR.
func generateSubnets(cidr string) ([]string, error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
usedBits, _ := ipNet.Mask.Size()
octet := determineOctet(usedBits)
ip := ipNet.IP.To4()
octetVal := ip[octet-1]
var subnets []string
for i := octetVal; i < 254; i++ {
octetVal++
ip[octet-1] = octetVal
subnets = append(subnets, fmt.Sprintf("%s/%d", ip, usedBits))
}
return subnets, nil
}
// nextCIDR retrieves the next available CIDR address from the given pool.
func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) (*net.IPNet, error) {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return nil, err
}
var ipNet *net.IPNet
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == "" && cidrPool.Status.Pool[i].Issued == 0 {
cidrPool.Status.Pool[i].ClusterName = clusterName
cidrPool.Status.Pool[i].Issued = time.Now().Unix()
_, ipn, err := net.ParseCIDR(cidrPool.Status.Pool[i].IPNet)
if err != nil {
return nil, err
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return nil, err
}
ipNet = ipn
break
}
}
return ipNet, nil
}
// releaseCIDR updates the given CIDR pool by marking the address as available.
func (c *ClusterReconciler) releaseCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) error {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return err
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == clusterName {
cidrPool.Status.Pool[i].ClusterName = ""
cidrPool.Status.Pool[i].Issued = 0
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,459 @@
package cluster
import (
"context"
"fmt"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
maxConcurrentReconciles = 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
type ClusterReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: maxConcurrentReconciles,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := client.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
server := server.New(cluster, c.Client)
agent := agent.New(cluster)
if cluster.Spec.Persistence == nil {
// default to ephermal nodes
cluster.Spec.Persistence = &v1alpha1.PersistenceConfig{
Type: EphermalNodesType,
}
}
if err := c.Client.Update(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to update cluster with persistence type", err)
}
// create a new namespace for the cluster
if err := c.createNamespace(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to create ns", err)
}
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
}
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster, server)
if err != nil {
return util.LogAndReturnErr("failed to create cluster service", err)
}
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.LogAndReturnErr("failed to create cluster configs", err)
}
// creating statefulsets in case the user chose a persistence type other than ephermal
if cluster.Spec.Persistence.Type != EphermalNodesType {
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Spec.Persistence.StorageRequestSize = "1G"
}
if err := c.createStatefulSets(ctx, cluster, server, agent); err != nil {
return util.LogAndReturnErr("failed to create servers and agents statefulsets", err)
}
} else {
if err := c.createDeployments(ctx, cluster, server); err != nil {
return util.LogAndReturnErr("failed to create servers and agents deployment", err)
}
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := server.Ingress(ctx, c.Client)
if err != nil {
return util.LogAndReturnErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create server ingress", err)
}
}
}
}
kubeconfigSecret, err := server.GenerateNewKubeConfig(ctx, serviceIP)
if err != nil {
return util.LogAndReturnErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create kubeconfig secret", err)
}
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create ns", err)
}
}
return nil
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.Server(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := config.Server(cluster, false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create agents configuration
agentsConfig := agentConfig(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment, err := server.Deploy(ctx, true)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create deployment for the rest of the servers
serversDeployment, err := server.Deploy(ctx, false)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agent := agent.New(cluster)
agentsDeployment := agent.Deploy()
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createStatefulSets(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, agent *agent.Agent) error {
// create headless service for the init statefulset
initServerStatefulService := server.StatefulServerService(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulsets for the init server
// the init statefulset must have only 1 replica
initServerStatefulSet := server.StatefulServer(ctx, cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulset for the rest of the servers
// create headless service for the server statefulset
serverStatefulService := server.StatefulServerService(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
serversStatefulSet := server.StatefulServer(ctx, cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create headless service for the agents statefulset
agentStatefulService := agent.StatefulAgentService(cluster)
if err := controllerutil.SetControllerReference(cluster, agentStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsStatefulSet := agent.StatefulAgent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func serverData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
return "cluster-init: true\n" + serverOptions(cluster)
}
func serverOptions(cluster *v1alpha1.Cluster) string {
var opts string
// TODO: generate token if not found
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}
func agentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "k3k-agent-config",
Namespace: util.ClusterNamespace(cluster),
},
Data: map[string][]byte{
"config.yaml": []byte(config),
},
}
}
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
}

View File

@@ -3,14 +3,14 @@ package config
import (
"fmt"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentConfigData(serviceIP, cluster.Spec.Token)
func Agent(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -27,7 +27,8 @@ func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
}
}
func agentConfigData(serviceIP, token string) string {
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
token: %s
with-node-id: true`, serviceIP, token)
}

View File

@@ -1,13 +1,13 @@
package config
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
name := "k3k-server-config"
if init {
name = "k3k-init-server-config"
@@ -33,7 +33,7 @@ func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.S
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
return "cluster-init: true\nserver: https://" + serviceIP + ":6443\n" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
@@ -47,15 +47,22 @@ func serverOptions(cluster *v1alpha1.Cluster) string {
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Spec.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Spec.ClusterCIDR + "\n"
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Spec.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Spec.ServiceCIDR + "\n"
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}

View File

@@ -1,363 +0,0 @@
package cluster
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster/agent"
"github.com/galal-hussein/k3k/pkg/controller/cluster/config"
"github.com/galal-hussein/k3k/pkg/controller/cluster/server"
"github.com/galal-hussein/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
)
type ClusterReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrClusterPool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrServicePool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := client.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, util.WrapErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// TODO: handle CIDR deletion
if err := c.releaseCIDR(ctx, cluster.Status.ClusterCIDR, cluster.Name); err != nil {
return reconcile.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
if err := c.createNamespace(ctx, cluster); err != nil {
return util.WrapErr("failed to create ns", err)
}
if cluster.Spec.ClusterCIDR == "" && cluster.Status.ClusterCIDR == "" {
clusterCIDR, err := c.nextCIDR(ctx, cidrAllocationClusterPoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ClusterCIDR = clusterCIDR.String()
}
if cluster.Spec.ServiceCIDR == "" && cluster.Status.ServiceCIDR == "" {
serviceCIDR, err := c.nextCIDR(ctx, cidrAllocationServicePoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ServiceCIDR = serviceCIDR.String()
}
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
return util.WrapErr("failed to create cluster service", err)
}
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.WrapErr("failed to create cluster configs", err)
}
if err := c.createDeployments(ctx, cluster); err != nil {
return util.WrapErr("failed to create servers and agents deployment", err)
}
if cluster.Spec.Expose.Ingress.Enabled {
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
}
}
}
kubeconfigSecret, err := server.GenerateNewKubeConfig(ctx, cluster, serviceIP)
if err != nil {
return util.WrapErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create kubeconfig secret", err)
}
}
return c.Client.Update(ctx, cluster)
}
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create ns", err)
}
}
return nil
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.ServerConfig(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create servers configuration
serverConfig, err := config.ServerConfig(cluster, false, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create agents configuration
agentsConfig := config.AgentConfig(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
var service v1.Service
objKey := client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment := server.Server(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create deployment for the rest of the servers
serversDeployment := server.Server(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsDeployment := agent.Agent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}

View File

@@ -3,8 +3,7 @@ package server
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/controller/util"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -18,39 +17,40 @@ const (
nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect"
)
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster, client client.Client) (*networkingv1.Ingress, error) {
func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) {
addresses, err := util.Addresses(ctx, client)
if err != nil {
return nil, err
}
ingressRules := ingressRules(cluster, addresses)
ingressRules := s.ingressRules(addresses)
ingress := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-server-ingress",
Namespace: util.ClusterNamespace(cluster),
Name: s.cluster.Name + "-server-ingress",
Namespace: util.ClusterNamespace(s.cluster),
},
Spec: networkingv1.IngressSpec{
IngressClassName: &cluster.Spec.Expose.Ingress.IngressClassName,
IngressClassName: &s.cluster.Spec.Expose.Ingress.IngressClassName,
Rules: ingressRules,
},
}
configureIngressOptions(ingress, cluster.Spec.Expose.Ingress.IngressClassName)
configureIngressOptions(ingress, s.cluster.Spec.Expose.Ingress.IngressClassName)
return ingress, nil
}
func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.IngressRule {
ingressRules := []networkingv1.IngressRule{}
func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: cluster.Name + "." + address + wildcardDNS,
Host: s.cluster.Name + "." + address + wildcardDNS,
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
@@ -61,7 +61,7 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
Service: &networkingv1.IngressServiceBackend{
Name: "k3k-server-service",
Port: networkingv1.ServiceBackendPort{
Number: 6443,
Number: port,
},
},
},
@@ -72,6 +72,7 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
}
ingressRules = append(ingressRules, rule)
}
return ingressRules
}

View File

@@ -11,9 +11,8 @@ import (
"net/http"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
@@ -44,8 +43,8 @@ type content struct {
// 2- generate client admin cert/key
// 3- use the ca cert from the bootstrap data & admin cert/key to write a new kubeconfig
// 4- save the new kubeconfig as a secret
func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secret, error) {
token := s.cluster.Spec.Token
var bootstrap *controlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
@@ -83,8 +82,8 @@ func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip st
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
Name: s.cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(s.cluster),
},
Data: map[string][]byte{
"kubeconfig.yaml": kubeconfigData,

View File

@@ -1,44 +1,133 @@
package server
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
var replicas int32
image := util.K3SImage(cluster)
const (
serverName = "k3k-"
k3kSystemNamespace = serverName + "system"
initServerName = serverName + "init-server"
)
name := "k3k-server"
// Server
type Server struct {
cluster *v1alpha1.Cluster
client client.Client
}
func New(cluster *v1alpha1.Cluster, client client.Client) *Server {
return &Server{
cluster: cluster,
client: client,
}
}
func (s *Server) Deploy(ctx context.Context, init bool) (*apps.Deployment, error) {
var replicas int32
image := util.K3SImage(s.cluster)
name := serverName + "server"
if init {
name = "k3k-init-server"
name = serverName + "init-server"
}
replicas = *cluster.Spec.Servers - 1
replicas = *s.cluster.Spec.Servers - 1
if init {
replicas = 1
}
var volumes []v1.Volume
var volumeMounts []v1.VolumeMount
for _, addon := range s.cluster.Spec.Addons {
namespace := k3kSystemNamespace
if addon.SecretNamespace != "" {
namespace = addon.SecretNamespace
}
nn := types.NamespacedName{
Name: addon.SecretRef,
Namespace: namespace,
}
var addons v1.Secret
if err := s.client.Get(ctx, nn, &addons); err != nil {
return nil, err
}
clusterAddons := v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: addons.Name,
Namespace: util.ClusterNamespace(s.cluster),
},
Data: make(map[string][]byte, len(addons.Data)),
}
for k, v := range addons.Data {
clusterAddons.Data[k] = v
}
if err := s.client.Create(ctx, &clusterAddons); err != nil {
return nil, err
}
name := "varlibrancherk3smanifests" + addon.SecretRef
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: addon.SecretRef,
},
},
}
volumes = append(volumes, volume)
volumeMount := v1.VolumeMount{
Name: name,
MountPath: "/var/lib/rancher/k3s/server/manifests/" + addon.SecretRef,
// changes to this part of the filesystem shouldn't be done manually. The secret should be updated instead.
ReadOnly: true,
}
volumeMounts = append(volumeMounts, volumeMount)
}
podSpec := s.podSpec(ctx, image, name, false)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
fmt.Printf("XXX - Pod Spec\n %#v\n", podSpec)
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
Name: s.cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(s.cluster),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"cluster": s.cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
@@ -46,21 +135,21 @@ func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"cluster": s.cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: serverPodSpec(image, name, cluster.Spec.ServerArgs),
Spec: podSpec,
},
},
}
}, nil
}
func serverPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
func (s *Server) podSpec(ctx context.Context, image, name string, statefulSet bool) v1.PodSpec {
args := append([]string{"server", "--config", "/opt/rancher/k3s/config.yaml"}, s.cluster.Spec.ServerArgs...)
return v1.PodSpec{
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
@@ -94,18 +183,6 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
@@ -118,17 +195,12 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s server --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
@@ -169,4 +241,109 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
},
},
}
if !statefulSet {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
return podSpec
}
func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster, init bool) *apps.StatefulSet {
var replicas int32
image := util.K3SImage(cluster)
name := serverName
if init {
name = initServerName
}
replicas = *cluster.Spec.Servers - 1
if init {
replicas = 1
}
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(cluster),
},
Spec: apps.StatefulSetSpec{
Replicas: &replicas,
ServiceName: cluster.Name + "-" + name + "-headless",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: s.podSpec(ctx, image, name, true),
},
},
}
}

View File

@@ -1,13 +1,24 @@
package server
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"strconv"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
serviceType := v1.ServiceTypeClusterIP
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.NodePort != nil {
if cluster.Spec.Expose.NodePort.Enabled {
serviceType = v1.ServiceTypeNodePort
}
}
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -18,11 +29,44 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Type: serviceType,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
},
Ports: []v1.ServicePort{
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: port,
},
},
},
}
}
func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster, init bool) *v1.Service {
name := serverName
if init {
name = initServerName
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name + "-" + name + "-headless",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
ClusterIP: v1.ClusterIPNone,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
Ports: []v1.ServicePort{
{
Name: "k3s-server-port",

View File

@@ -3,7 +3,7 @@ package util
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -14,6 +14,10 @@ const (
k3SImageName = "rancher/k3s"
)
const (
K3kSystemNamespace = namespacePrefix + "system"
)
func ClusterNamespace(cluster *v1alpha1.Cluster) string {
return namespacePrefix + cluster.Name
}
@@ -22,7 +26,7 @@ func K3SImage(cluster *v1alpha1.Cluster) string {
return k3SImageName + ":" + cluster.Spec.Version
}
func WrapErr(errString string, err error) error {
func LogAndReturnErr(errString string, err error) error {
klog.Errorf("%s: %v", errString, err)
return err
}
@@ -35,7 +39,8 @@ func nodeAddress(node *v1.Node) string {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
break
} else if ip.Type == "InternalIP" && ip.Address != "" {
}
if ip.Type == "InternalIP" && ip.Address != "" {
internalIP = ip.Address
}
}

View File

@@ -1,10 +0,0 @@
package version
import "strings"
var (
Program = "k3k"
ProgramUpper = strings.ToUpper(Program)
Version = "dev"
GitCommit = "HEAD"
)