Compare commits

..

19 Commits

Author SHA1 Message Date
Hussein Galal
8968fe1d62 Fix docker image tag (#52)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-07-04 00:03:28 +03:00
Brad Davidson
84d3f768c6 Run k3s as pid 1 (#50)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
2023-06-27 02:43:01 +03:00
Hussein Galal
decf24cb2a K3k chart (#51)
* Add release chart drone action

* fix release charts

* Add deploy dir to Dapper

* Add remove build step from drone k3k-chart

* Fix repo and org name

* fix ci

* add index.yaml
2023-06-24 00:27:05 +03:00
Hussein Galal
861078fa85 Remove github workflow and add drone (#49) 2023-06-21 00:43:16 +03:00
Hussein Galal
da5ddb27b5 Use env variables in CI action (#48) 2023-06-20 21:27:35 +03:00
Hussein Galal
e1576343a8 Fix action ci (#47) 2023-06-20 21:02:41 +03:00
Hussein Galal
ff256a324b fix action (#46)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:57:20 +03:00
Hussein Galal
6318fc29bf use custom action (#45)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:43:39 +03:00
Hussein Galal
d9eafbb1d2 Use ibuildthecloud/github-release in github action (#44)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-20 20:21:33 +03:00
Hussein Galal
da3ba1b5ff Fix release ci (#43) 2023-06-20 19:27:40 +03:00
Hussein Galal
176deae781 Update the chart (#38)
* Update chart to v0.1.1-k3k2

* update image tag
2023-06-14 03:18:44 +03:00
Brian Downs
7ec204683f Merge pull request #39 from briandowns/secure_build
update to perform secure build and possible arm64 support
2023-06-13 17:18:35 -07:00
Brian Downs
fac92fb21a update to perform secure build and possible arm64 support
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 17:16:51 -07:00
Brian Downs
fb40f65c75 Merge pull request #37 from briandowns/update_package_refs 2023-06-13 17:00:42 -07:00
Brian Downs
b2e969f6df update package refs
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-06-13 16:45:46 -07:00
Hussein Galal
43d7779dfa Export k3k cluster kubeconfig in k3kcli (#36)
* Export k3k cluster kubeconfig in k3kcli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Update readme and logs

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-06-13 19:48:57 +03:00
Hussein Galal
ea1e7e486f Revert CIDR pool allocation and fix delete (#35)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-28 23:45:57 +02:00
Hussein Galal
7bcc312b4b move crds to the helm chart (#34)
* Fixes to the controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Move crds to the helm chart

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix statically configured cluster

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-03-23 21:33:52 +02:00
Hussein Galal
dde877e285 Fixes to the controller and cli (#33)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2023-02-03 18:00:07 +02:00
33 changed files with 581 additions and 307 deletions

137
.drone.yml Normal file
View File

@@ -0,0 +1,137 @@
---
kind: pipeline
name: amd64
platform:
os: linux
arch: amd64
steps:
- name: build
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
exclude:
- k3k-chart
- name: package-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper package-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: release-chart
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper release-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
- name: github_binary_release
image: ibuildthecloud/github-release:v0.0.1
settings:
api_key:
from_secret: github_token
prerelease: true
checksum:
- sha256
checksum_file: CHECKSUMsum-amd64.txt
checksum_flatten: true
files:
- "bin/*"
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
- name: docker-publish
image: plugins/docker
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3k"
username:
from_secret: docker_username
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
type: docker
name: manifest
platform:
os: linux
arch: amd64
steps:
- name: push-runtime-manifest
image: plugins/manifest
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
spec: manifest-runtime.tmpl
when:
event:
- tag
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
branch:
exclude:
- k3k-chart
depends_on:
- amd64

View File

@@ -1,32 +0,0 @@
name: Chart Release
on:
push:
branches:
- main
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: charts
env:
CR_TOKEN: "${{ secrets.TOKEN }}"

View File

@@ -1,42 +0,0 @@
name: K3K Release
on:
push:
tags:
- "v*"
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build K3K
uses: addnab/docker-run-action@v3
with:
registry: docker.io
image: rancher/dapper:v0.5.5
options: -v ${{ github.workspace }}:/work -v /var/run/docker.sock:/var/run/docker.sock
run: |
cd /work && dapper ci
- name: Publish Binaries
uses: SierraSoftworks/gh-releases@v1.0.7
with:
token: ${{ secrets.TOKEN }}
overwrite: 'true'
files: |
${{ github.workspace }}/bin/k3k
${{ github.workspace }}/bin/k3kcli
- name: Docker Hub Login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
push: true
tags: husseingalal/k3k:${{ github.ref_name }}
file: ./package/Dockerfile
context: .

View File

@@ -1,18 +1,21 @@
ARG GOLANG=golang:1.19.5-alpine3.17
ARG GOLANG=rancher/hardened-build-base:v1.20.4b11
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
ENV ARCH $DAPPER_HOST_ARCH
RUN apk -U add bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN apk -U add \bash git gcc musl-dev docker vim less file curl wget ca-certificates
RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
fi
RUN curl -sL https://github.com/helm/chart-releaser/releases/download/v1.5.0/chart-releaser_1.5.0_linux_${ARCH}.tar.gz | tar -xz cr \
&& mv cr /bin/
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS
ENV DAPPER_SOURCE /go/src/github.com/galal-hussein/k3k/
ENV DAPPER_OUTPUT ./bin ./dist
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}

View File

@@ -2,9 +2,11 @@
A Kubernetes in Kubernetes tool, k3k provides a way to run multiple embedded isolated k3s clusters on your kubernetes cluster.
## Why?
## Example
![alt text](https://github.com/galal-hussein/k3k/blob/main/hack/becausewecan.jpg?raw=true)
An example on creating a k3k cluster on an RKE2 host using k3kcli
[![asciicast](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp.svg)](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
## Usage
@@ -18,7 +20,9 @@ Helm's [documentation](https://helm.sh/docs) to get started.
Once Helm has been set up correctly, add the repo as follows:
helm repo add k3k https://galal-hussein.github.io/k3k
```
helm repo add k3k https://rancher.github.io/k3k
```
If you had already added this repo earlier, run `helm repo update` to retrieve
the latest versions of the packages. You can then run `helm search repo
@@ -37,9 +41,9 @@ To uninstall the chart:
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
```
wget https://github.com/galal-hussein/k3k/releases/download/v0.0.0-alpha2/k3k
chmod +x k3k
sudo cp k3k /usr/local/bin
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
chmod +x k3kcli
sudo cp k3kcli /usr/local/bin
```
To create a new cluster you can use:

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.1
appVersion: 0.0.0-alpha3
version: 0.1.0-r1
appVersion: 0.0.0-alpha6

View File

@@ -39,6 +39,10 @@ spec:
type: array
items:
type: string
tlsSANs:
type: array
items:
type: string
expose:
type: object
properties:
@@ -54,11 +58,20 @@ spec:
properties:
enabled:
type: boolean
nodePort:
type: object
properties:
enabled:
type: boolean
status:
type: object
properties:
overrideClusterCIDR:
type: boolean
clusterCIDR:
type: string
overrideServiceCIDR:
type: boolean
serviceCIDR:
type: string
clusterDNS:

View File

@@ -2,10 +2,10 @@ replicaCount: 1
namespace: k3k-system
image:
repository: husseingalal/k3k
pullPolicy: IfNotPresent
repository: rancher/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.0.0-alpha3"
tag: "v0.0.0-alpha6"
imagePullSecrets: []
nameOverride: ""
@@ -16,4 +16,4 @@ serviceAccount:
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
name: ""

View File

@@ -1,7 +1,7 @@
package cluster
import (
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds"
"github.com/urfave/cli"
)

View File

@@ -3,21 +3,40 @@ package cluster
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Scheme = runtime.NewScheme()
Scheme = runtime.NewScheme()
backoff = wait.Backoff{
Steps: 5,
Duration: 3 * time.Second,
Factor: 2,
Jitter: 0.1,
}
)
func init() {
@@ -82,7 +101,7 @@ var (
Name: "version",
Usage: "k3s version",
Destination: &version,
Value: "v1.26.1+k3s1",
Value: "v1.26.1-k3s1",
},
}
)
@@ -101,10 +120,11 @@ func createCluster(clx *cli.Context) error {
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("creating a new cluster [%s]", name)
logrus.Infof("Creating a new cluster [%s]", name)
cluster := newCluster(
name,
token,
@@ -116,7 +136,54 @@ func createCluster(clx *cli.Context) error {
agentArgs,
)
return ctrlClient.Create(ctx, cluster)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Enabled: true,
},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
cluster.Spec.TLSSANs = []string{
host[0],
}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
logrus.Infof("Cluster [%s] already exists", name)
} else {
return err
}
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
var kubeconfig []byte
err = retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
}
func validateCreateFlags(clx *cli.Context) error {
@@ -157,3 +224,78 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
},
}
}
func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, serverIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
}
var kubeSecret v1.Secret
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
return nil, err
}
kubeconfig := kubeSecret.Data["kubeconfig.yaml"]
if kubeconfig == nil {
return nil, errors.New("empty kubeconfig")
}
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
if err != nil {
return nil, err
}
hostURL := fmt.Sprintf("https://%s:%d", serverIP, nodePort)
restConfig.Host = hostURL
clientConfig := generateKubeconfigFromRest(restConfig)
b, err := clientcmd.Write(clientConfig)
if err != nil {
return nil, err
}
kubeconfig = b
}
return kubeconfig, nil
}
func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
clusters := make(map[string]*clientcmdapi.Cluster)
clusters["default-cluster"] = &clientcmdapi.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts["default-context"] = &clientcmdapi.Context{
Cluster: "default-cluster",
Namespace: "default",
AuthInfo: "default",
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos["default"] = &clientcmdapi.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: "default-context",
AuthInfos: authinfos,
}
return clientConfig
}

View File

@@ -3,9 +3,9 @@ package main
import (
"os"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/cli/cmds/cluster"
"github.com/galal-hussein/k3k/pkg/version"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/pkg/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)

View File

@@ -1,38 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cidrallocationpools.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
defaultClusterCIDR:
type: string
status:
type: object
properties:
pool:
type: array
items:
type: object
properties:
clusterName:
type: string
issued:
type: integer
ipNet:
type: string
scope: Cluster
names:
plural: cidrallocationpools
singular: cidrallocationpool
kind: CIDRAllocationPool

4
go.mod
View File

@@ -1,4 +1,4 @@
module github.com/galal-hussein/k3k
module github.com/rancher/k3k
go 1.19
@@ -67,7 +67,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiserver v0.26.1
k8s.io/klog/v2 v2.80.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/controller-runtime v0.14.1
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect

View File

@@ -12,8 +12,8 @@ CODEGEN_PKG=./code-generator
"${CODEGEN_PKG}/generate-groups.sh" \
"deepcopy" \
github.com/galal-hussein/k3k/pkg/generated \
github.com/galal-hussein/k3k/pkg/apis \
github.com/rancher/k3k/pkg/generated \
github.com/rancher/k3k/pkg/apis \
"k3k.io:v1alpha1" \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../../.."

View File

@@ -5,8 +5,8 @@ import (
"context"
"flag"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"

6
manifest-runtime.tmpl Normal file
View File

@@ -0,0 +1,6 @@
image: rancher/k3k:{{replace "+" "-" build.tag}}
manifests:
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
platform:
architecture: amd64
os: linux

View File

@@ -1,7 +1,7 @@
package v1alpha1
import (
k3k "github.com/galal-hussein/k3k/pkg/apis/k3k.io"
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -19,7 +19,9 @@ func Resource(resource string) schema.GroupResource {
}
func addKnownTypes(s *runtime.Scheme) error {
s.AddKnownTypes(SchemeGroupVersion, &Cluster{}, &ClusterList{})
s.AddKnownTypes(SchemeGroupVersion,
&Cluster{},
&ClusterList{})
metav1.AddToGroupVersion(s, SchemeGroupVersion)
return nil
}

View File

@@ -16,19 +16,19 @@ type Cluster struct {
}
type ClusterSpec struct {
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`
Token string `json:"token"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
ServerArgs []string `json:"serverArgs,omitempty"`
AgentArgs []string `json:"agentArgs,omitempty"`
Expose ExposeConfig `json:"expose,omitempty"`
Expose *ExposeConfig `json:"expose,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -43,6 +43,7 @@ type ClusterList struct {
type ExposeConfig struct {
Ingress *IngressConfig `json:"ingress"`
LoadBalancer *LoadBalancerConfig `json:"loadbalancer"`
NodePort *NodePortConfig `json:"nodePort"`
}
type IngressConfig struct {
@@ -54,6 +55,10 @@ type LoadBalancerConfig struct {
Enabled bool `json:"enabled"`
}
type NodePortConfig struct {
Enabled bool `json:"enabled"`
}
type ClusterStatus struct {
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`

View File

@@ -207,7 +207,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Expose.DeepCopyInto(&out.Expose)
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
(*in).DeepCopyInto(*out)
}
return
}
@@ -250,6 +259,11 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
*out = new(LoadBalancerConfig)
**out = **in
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(NodePortConfig)
**out = **in
}
return
}
@@ -294,3 +308,19 @@ func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig.
func (in *NodePortConfig) DeepCopy() *NodePortConfig {
if in == nil {
return nil
}
out := new(NodePortConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -3,7 +3,7 @@ package addressallocator
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"

View File

@@ -1,13 +1,12 @@
package agent
import (
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
@@ -46,8 +45,7 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
}
func agentPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
return v1.PodSpec{
Volumes: []v1.Volume{
{
@@ -106,17 +104,12 @@ func agentPodSpec(image, name string, args []string) v1.PodSpec {
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.BoolPtr(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s agent --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",

View File

@@ -6,7 +6,8 @@ import (
"net"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"k8s.io/apimachinery/pkg/types"
)
@@ -67,11 +68,20 @@ func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return nil, err
return nil, util.WrapErr("failed to get cidrpool", err)
}
var ipNet *net.IPNet
for _, pool := range cidrPool.Status.Pool {
if pool.ClusterName == clusterName {
_, ipn, err := net.ParseCIDR(pool.IPNet)
if err != nil {
return nil, util.WrapErr("failed to parse cidr", err)
}
return ipn, nil
}
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == "" && cidrPool.Status.Pool[i].Issued == 0 {
cidrPool.Status.Pool[i].ClusterName = clusterName
@@ -79,11 +89,10 @@ func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName
_, ipn, err := net.ParseCIDR(cidrPool.Status.Pool[i].IPNet)
if err != nil {
return nil, err
return nil, util.WrapErr("failed to parse cidr", err)
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return nil, err
if err := c.Client.Update(ctx, &cidrPool); err != nil {
return nil, util.WrapErr("failed to update cidr pool", err)
}
ipNet = ipn

View File

@@ -3,8 +3,8 @@ package config
import (
"fmt"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

View File

@@ -1,8 +1,8 @@
package config
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -47,15 +47,22 @@ func serverOptions(cluster *v1alpha1.Cluster) string {
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Spec.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Spec.ClusterCIDR + "\n"
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Spec.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Spec.ServiceCIDR + "\n"
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}

View File

@@ -3,11 +3,11 @@ package cluster
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster/agent"
"github.com/galal-hussein/k3k/pkg/controller/cluster/config"
"github.com/galal-hussein/k3k/pkg/controller/cluster/server"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -40,72 +40,6 @@ func Add(ctx context.Context, mgr manager.Manager) error {
Scheme: mgr.GetScheme(),
}
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrClusterPool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrServicePool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
@@ -151,11 +85,6 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// TODO: handle CIDR deletion
if err := c.releaseCIDR(ctx, cluster.Status.ClusterCIDR, cluster.Name); err != nil {
return reconcile.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
@@ -173,22 +102,17 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
return util.WrapErr("failed to create ns", err)
}
if cluster.Spec.ClusterCIDR == "" && cluster.Status.ClusterCIDR == "" {
clusterCIDR, err := c.nextCIDR(ctx, cidrAllocationClusterPoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ClusterCIDR = clusterCIDR.String()
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
}
if cluster.Spec.ServiceCIDR == "" && cluster.Status.ServiceCIDR == "" {
serviceCIDR, err := c.nextCIDR(ctx, cidrAllocationServicePoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ServiceCIDR = serviceCIDR.String()
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
return util.WrapErr("failed to create cluster service", err)
@@ -202,15 +126,17 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
return util.WrapErr("failed to create servers and agents deployment", err)
}
if cluster.Spec.Expose.Ingress.Enabled {
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
}
}
}
}
@@ -361,3 +287,68 @@ func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1al
return nil
}
func (c *ClusterReconciler) createCIDRPools(ctx context.Context) error {
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := c.Client.Create(ctx, &cidrClusterPool); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
return err
}
}
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := c.Client.Create(ctx, &cidrServicePool); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
return err
}
}
return nil
}

View File

@@ -3,8 +3,8 @@ package server
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

View File

@@ -11,9 +11,9 @@ import (
"net/http"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"

View File

@@ -2,13 +2,13 @@ package server
import (
"strconv"
"strings"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
@@ -58,8 +58,7 @@ func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
}
func serverPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
args = append([]string{"server", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
return v1.PodSpec{
Volumes: []v1.Volume{
{
@@ -118,17 +117,12 @@ func serverPodSpec(image, name string, args []string) v1.PodSpec {
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
Privileged: pointer.BoolPtr(true),
},
Command: []string{
"/bin/sh",
},
Args: []string{
"-c",
"/bin/k3s server --config /opt/rancher/k3s/config.yaml " +
strings.Join(args, " ") +
" && true",
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",

View File

@@ -1,13 +1,21 @@
package server
import (
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
serviceType := v1.ServiceTypeClusterIP
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.NodePort != nil {
if cluster.Spec.Expose.NodePort.Enabled {
serviceType = v1.ServiceTypeNodePort
}
}
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -18,7 +26,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Type: serviceType,
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",

View File

@@ -3,7 +3,7 @@ package util
import (
"context"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"

10
scripts/package-chart Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p deploy/
cr package --package-path deploy/ charts/k3k

32
scripts/release-chart Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
set -ex
source $(dirname $0)/version
cd $(dirname $0)/..
git fetch --tags
CHART_TAG=chart-$(grep "version: " charts/k3k/Chart.yaml | awk '{print $2}')
if [ $(git tag -l "$version") ]; then
echo "tag already exists"
exit 1
fi
# release the chart with artifacts
cr upload --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--git-repo k3k \
--skip-existing \
-o rancher
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher \
--push

View File

@@ -20,7 +20,7 @@ fi
SUFFIX="-${ARCH}"
TAG=${TAG:-${VERSION}${SUFFIX}}
REPO=${REPO:-husseingalal}
REPO=${REPO:-rancher}
if echo $TAG | grep -q dirty; then
TAG=dev