Compare commits

...

2 Commits

Author SHA1 Message Date
Hussein Galal
4c4a1c9b57 Fix chart release (#24)
* Add release and chart workflow

* fix

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* more workflow fixes

* more workflow fixes

* initial allocator impl

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* add doc comments

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* update cidr pool names

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* update per pr review

Signed-off-by: Brian Downs <brian.downs@gmail.com>

* Add release workflow (#20)

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add k3kcli (#21)

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix ci (#22)

* update to v0.0.0-alpha2

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
Signed-off-by: Brian Downs <brian.downs@gmail.com>
Co-authored-by: Brian Downs <brian.downs@gmail.com>
2023-02-03 06:12:08 +02:00
Hussein Galal
0409c5e638 update to v0.0.0-alpha2 (#23) 2023-02-03 06:08:00 +02:00
29 changed files with 850 additions and 139 deletions

View File

@@ -3,7 +3,7 @@ name: Chart Release
on:
push:
branches:
- main
- k3k-chart
jobs:
release:

41
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: K3K Release
on:
push:
tags:
- "v*"
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build K3K
uses: addnab/docker-run-action@v3
with:
registry: docker.io
image: rancher/dapper:v0.5.5
options: -v ${{ github.workspace }}:/work -v /var/run/docker.sock:/var/run/docker.sock
run: |
cd /work && dapper ci
- name: Publish Binaries
uses: SierraSoftworks/gh-releases@v1.0.7
with:
token: ${{ secrets.TOKEN }}
overwrite: 'true'
files: |
${{ github.workspace }}/bin/k3k
- name: Docker Hub Login
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
push: true
tags: husseingalal/k3k:${{ github.ref_name }}
file: ./package/Dockerfile
context: .

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.0
appVersion: 0.1.0
version: 0.0.0-alpha2
appVersion: v0.0.0-alpha2

View File

@@ -5,7 +5,7 @@ image:
repository: husseingalal/k3k
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
tag: "v0.0.0-alpha2"
imagePullSecrets: []
nameOverride: ""

View File

@@ -0,0 +1,27 @@
package cluster
import (
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/urfave/cli"
)
var clusterSubcommands = []cli.Command{
{
Name: "create",
Usage: "Create new cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: createCluster,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
},
}
func NewClusterCommand() cli.Command {
cmd := cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
}
return cmd
}

151
cli/cmds/cluster/create.go Normal file
View File

@@ -0,0 +1,151 @@
package cluster
import (
"context"
"errors"
"os"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
Scheme = runtime.NewScheme()
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
var (
name string
token string
clusterCIDR string
serviceCIDR string
servers int64
agents int64
serverArgs cli.StringSlice
agentArgs cli.StringSlice
clusterCreateFlags = []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
cli.Int64Flag{
Name: "servers",
Usage: "number of servers",
Destination: &servers,
Value: 1,
},
cli.Int64Flag{
Name: "agents",
Usage: "number of agents",
Destination: &agents,
},
cli.StringFlag{
Name: "token",
Usage: "token of the cluster",
Destination: &token,
},
cli.StringFlag{
Name: "cluster-cidr",
Usage: "cluster CIDR",
Destination: &clusterCIDR,
},
cli.StringFlag{
Name: "service-cidr",
Usage: "service CIDR",
Destination: &serviceCIDR,
},
cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Value: &serverArgs,
},
cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Value: &agentArgs,
},
}
)
func createCluster(clx *cli.Context) error {
ctx := context.Background()
if err := validateCreateFlags(clx); err != nil {
return err
}
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("creating a new cluster [%s]", name)
cluster := newCluster(
name,
token,
int32(servers),
int32(agents),
clusterCIDR,
serviceCIDR,
serverArgs,
agentArgs,
)
return ctrlClient.Create(ctx, cluster)
}
func validateCreateFlags(clx *cli.Context) error {
if token == "" {
return errors.New("empty cluster token")
}
if name == "" {
return errors.New("empty cluster name")
}
if servers <= 0 {
return errors.New("invalid number of servers")
}
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
return errors.New("empty kubeconfig")
}
return nil
}
func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceCIDR string, serverArgs, agentArgs []string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.ClusterSpec{
Name: name,
Token: token,
Servers: &servers,
Agents: &agents,
ClusterCIDR: clusterCIDR,
ServiceCIDR: serviceCIDR,
ServerArgs: serverArgs,
AgentArgs: agentArgs,
},
}
}

View File

@@ -0,0 +1 @@
package cluster

42
cli/cmds/root.go Normal file
View File

@@ -0,0 +1,42 @@
package cmds
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
debug bool
Kubeconfig string
CommonFlags = []cli.Flag{
cli.StringFlag{
Name: "kubeconfig",
EnvVar: "KUBECONFIG",
Usage: "Kubeconfig path",
Destination: &Kubeconfig,
},
}
)
func NewApp() *cli.App {
app := cli.NewApp()
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &debug,
EnvVar: "K3K_DEBUG",
},
}
app.Before = func(clx *cli.Context) error {
if debug {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
return app
}

23
cli/main.go Normal file
View File

@@ -0,0 +1,23 @@
package main
import (
"os"
"github.com/galal-hussein/k3k/cli/cmds"
"github.com/galal-hussein/k3k/cli/cmds/cluster"
"github.com/galal-hussein/k3k/pkg/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
}
app.Version = version.Version + " (" + version.GitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

View File

@@ -0,0 +1,38 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: cidrallocationpools.k3k.io
spec:
group: k3k.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
defaultClusterCIDR:
type: string
status:
type: object
properties:
pool:
type: array
items:
type: object
properties:
clusterName:
type: string
issued:
type: integer
ipNet:
type: string
scope: Cluster
names:
plural: cidrallocationpools
singular: cidrallocationpool
kind: CIDRAllocationPool

5
go.mod
View File

@@ -3,6 +3,8 @@ module github.com/galal-hussein/k3k
go 1.19
require (
github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.12
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.26.1
@@ -12,6 +14,7 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
@@ -34,7 +37,7 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.3.0 // indirect

14
go.sum
View File

@@ -32,6 +32,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -51,6 +52,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -244,6 +247,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua
github.com/rancher/dynamiclistener v0.3.5 h1:5TaIHvkDGmZKvc96Huur16zfTKOiLhDtK4S+WV0JA6A=
github.com/rancher/dynamiclistener v0.3.5/go.mod h1:dW/YF6/m2+uEyJ5VtEcd9THxda599HP6N9dSXk81+k0=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -254,12 +259,19 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

View File

@@ -8,7 +8,7 @@ CODEGEN_GIT_PKG=https://github.com/kubernetes/code-generator.git
git clone --depth 1 ${CODEGEN_GIT_PKG} || true
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=../code-generator
CODEGEN_PKG=./code-generator
"${CODEGEN_PKG}/generate-groups.sh" \
"deepcopy" \

View File

@@ -29,6 +29,8 @@ func main() {
ctrlconfig.RegisterFlags(nil)
flag.Parse()
ctx := context.Background()
kubeconfig := flag.Lookup("kubeconfig").Value.String()
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
@@ -41,11 +43,12 @@ func main() {
if err != nil {
klog.Fatalf("Failed to create new controller runtime manager: %v", err)
}
if err := cluster.Add(mgr); err != nil {
if err := cluster.Add(ctx, mgr); err != nil {
klog.Fatalf("Failed to add the new controller: %v", err)
}
if err := mgr.Start(context.Background()); err != nil {
if err := mgr.Start(ctx); err != nil {
klog.Fatalf("Failed to start the manager: %v", err)
}
}

View File

@@ -1,3 +1,4 @@
FROM alpine
COPY bin/k3k /usr/bin/
COPY bin/k3kcli /usr/bin/
CMD ["k3k"]

View File

@@ -59,3 +59,36 @@ type ClusterStatus struct {
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
}
type Allocation struct {
ClusterName string `json:"clusterName"`
Issued int64 `json:"issued"`
IPNet string `json:"ipNet"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPool struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Spec CIDRAllocationPoolSpec `json:"spec"`
Status CIDRAllocationPoolStatus `json:"status"`
}
type CIDRAllocationPoolSpec struct {
DefaultClusterCIDR string `json:"defaultClusterCIDR"`
}
type CIDRAllocationPoolStatus struct {
Pool []Allocation `json:"pool"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CIDRAllocationPoolList struct {
metav1.ListMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
Items []CIDRAllocationPool `json:"items"`
}

View File

@@ -9,6 +9,120 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Allocation) DeepCopyInto(out *Allocation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Allocation.
func (in *Allocation) DeepCopy() *Allocation {
if in == nil {
return nil
}
out := new(Allocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPool) DeepCopyInto(out *CIDRAllocationPool) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPool.
func (in *CIDRAllocationPool) DeepCopy() *CIDRAllocationPool {
if in == nil {
return nil
}
out := new(CIDRAllocationPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPool) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolList) DeepCopyInto(out *CIDRAllocationPoolList) {
*out = *in
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CIDRAllocationPool, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolList.
func (in *CIDRAllocationPoolList) DeepCopy() *CIDRAllocationPoolList {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CIDRAllocationPoolList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolSpec) DeepCopyInto(out *CIDRAllocationPoolSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolSpec.
func (in *CIDRAllocationPoolSpec) DeepCopy() *CIDRAllocationPoolSpec {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDRAllocationPoolStatus) DeepCopyInto(out *CIDRAllocationPoolStatus) {
*out = *in
if in.Pool != nil {
in, out := &in.Pool, &out.Pool
*out = make([]Allocation, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRAllocationPoolStatus.
func (in *CIDRAllocationPoolStatus) DeepCopy() *CIDRAllocationPoolStatus {
if in == nil {
return nil
}
out := new(CIDRAllocationPoolStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Cluster) DeepCopyInto(out *Cluster) {
*out = *in

View File

@@ -13,7 +13,7 @@ import (
func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
image := util.K3SImage(cluster)
name := "k3k-agent"
const name = "k3k-agent"
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -47,6 +47,7 @@ func Agent(cluster *v1alpha1.Cluster) *apps.Deployment {
func agentPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
return v1.PodSpec{
Volumes: []v1.Volume{
{

View File

@@ -0,0 +1,120 @@
package cluster
import (
"context"
"fmt"
"net"
"time"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/types"
)
const (
cidrAllocationClusterPoolName = "k3k-cluster-cidr-allocation-pool"
cidrAllocationServicePoolName = "k3k-service-cidr-allocation-pool"
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
// determineOctet dertermines the octet for the
// given mask bits of a subnet.
func determineOctet(mb int) uint8 {
switch {
case mb <= 8:
return 1
case mb >= 8 && mb <= 16:
return 2
case mb >= 8 && mb <= 24:
return 3
case mb >= 8 && mb <= 32:
return 4
default:
return 0
}
}
// generateSubnets generates all subnets for the given CIDR.
func generateSubnets(cidr string) ([]string, error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
usedBits, _ := ipNet.Mask.Size()
octet := determineOctet(usedBits)
ip := ipNet.IP.To4()
octetVal := ip[octet-1]
var subnets []string
for i := octetVal; i < 254; i++ {
octetVal++
ip[octet-1] = octetVal
subnets = append(subnets, fmt.Sprintf("%s/%d", ip, usedBits))
}
return subnets, nil
}
// nextCIDR retrieves the next available CIDR address from the given pool.
func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) (*net.IPNet, error) {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return nil, err
}
var ipNet *net.IPNet
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == "" && cidrPool.Status.Pool[i].Issued == 0 {
cidrPool.Status.Pool[i].ClusterName = clusterName
cidrPool.Status.Pool[i].Issued = time.Now().Unix()
_, ipn, err := net.ParseCIDR(cidrPool.Status.Pool[i].IPNet)
if err != nil {
return nil, err
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return nil, err
}
ipNet = ipn
break
}
}
return ipNet, nil
}
// releaseCIDR updates the given CIDR pool by marking the address as available.
func (c *ClusterReconciler) releaseCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) error {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return err
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == clusterName {
cidrPool.Status.Pool[i].ClusterName = ""
cidrPool.Status.Pool[i].Issued = 0
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return err
}
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentConfigData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",

View File

@@ -1,8 +1,6 @@
package config
import (
"fmt"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
@@ -35,32 +33,29 @@ func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.S
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster) string {
opts := serverOptions(cluster)
return fmt.Sprintf(`cluster-init: true
server: https://%s:6443
%s`, serviceIP, opts)
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
opts := serverOptions(cluster)
return fmt.Sprintf(`cluster-init: true
%s`, opts)
return "cluster-init: true\n" + serverOptions(cluster)
}
func serverOptions(cluster *v1alpha1.Cluster) string {
opts := ""
var opts string
// TODO: generate token if not found
if cluster.Spec.Token != "" {
opts = fmt.Sprintf("token: %s\n", cluster.Spec.Token)
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Spec.ClusterCIDR != "" {
opts = fmt.Sprintf("%scluster-cidr: %s\n", opts, cluster.Spec.ClusterCIDR)
opts = opts + "cluster-cidr: " + cluster.Spec.ClusterCIDR + "\n"
}
if cluster.Spec.ServiceCIDR != "" {
opts = fmt.Sprintf("%sservice-cidr: %s\n", opts, cluster.Spec.ServiceCIDR)
opts = opts + "service-cidr: " + cluster.Spec.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = fmt.Sprintf("%scluster-dns: %s\n", opts, cluster.Spec.ClusterDNS)
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
return opts
}

View File

@@ -2,7 +2,6 @@ package cluster
import (
"context"
"fmt"
"github.com/galal-hussein/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/galal-hussein/k3k/pkg/controller/cluster/agent"
@@ -24,8 +23,8 @@ import (
)
const (
ClusterController = "k3k-cluster-controller"
ClusterFinalizerName = "cluster.k3k.io/finalizer"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
)
type ClusterReconciler struct {
@@ -34,94 +33,182 @@ type ClusterReconciler struct {
}
// Add adds a new controller to the manager
func Add(mgr manager.Manager) error {
func Add(ctx context.Context, mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(ClusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
if err := controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}},
&handler.EnqueueRequestForObject{}); err != nil {
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrClusterPool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
return nil
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := reconciler.Client.Create(ctx, &cidrServicePool); err != nil {
if !apierrors.IsConflict(err) {
// return nil since the resource has
// already been created
return nil
}
return err
}
// create a new controller and add it to the manager
//this can be replaced by the new builder functionality in controller-runtime
controller, err := controller.New(clusterController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (r *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
cluster := &v1alpha1.Cluster{}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var cluster v1alpha1.Cluster
if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(cluster, ClusterFinalizerName) {
controllerutil.AddFinalizer(cluster, ClusterFinalizerName)
if err := r.Client.Update(ctx, cluster); err != nil {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
// we create a namespace for each new cluster
ns := &v1.Namespace{}
if err := r.Client.Get(ctx, client.ObjectKey{Name: util.ClusterNamespace(cluster)}, ns); err != nil {
var ns v1.Namespace
objKey := client.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{},
util.WrapErr(fmt.Sprintf("failed to get cluster namespace %s", util.ClusterNamespace(cluster)), err)
return reconcile.Result{}, util.WrapErr("failed to get cluster namespace "+util.ClusterNamespace(&cluster), err)
}
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
return reconcile.Result{}, r.createCluster(ctx, cluster)
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(cluster, ClusterFinalizerName) {
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// TODO: handle CIDR deletion
if err := c.releaseCIDR(ctx, cluster.Status.ClusterCIDR, cluster.Name); err != nil {
return reconcile.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(cluster, ClusterFinalizerName)
if err := r.Client.Update(ctx, cluster); err != nil {
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
return reconcile.Result{}, nil
}
func (r *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
if err := r.createNamespace(ctx, cluster); err != nil {
if err := c.createNamespace(ctx, cluster); err != nil {
return util.WrapErr("failed to create ns", err)
}
serviceIP, err := r.createClusterService(ctx, cluster)
if cluster.Spec.ClusterCIDR == "" && cluster.Status.ClusterCIDR == "" {
clusterCIDR, err := c.nextCIDR(ctx, cidrAllocationClusterPoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ClusterCIDR = clusterCIDR.String()
}
if cluster.Spec.ServiceCIDR == "" && cluster.Status.ServiceCIDR == "" {
serviceCIDR, err := c.nextCIDR(ctx, cidrAllocationServicePoolName, cluster.Name)
if err != nil {
return err
}
cluster.Status.ServiceCIDR = serviceCIDR.String()
}
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
return util.WrapErr("failed to create cluster service", err)
}
if err := r.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
if err := c.createClusterConfigs(ctx, cluster, serviceIP); err != nil {
return util.WrapErr("failed to create cluster configs", err)
}
if err := r.createDeployments(ctx, cluster); err != nil {
if err := c.createDeployments(ctx, cluster); err != nil {
return util.WrapErr("failed to create servers and agents deployment", err)
}
if cluster.Spec.Expose.Ingress.Enabled {
serverIngress, err := server.Ingress(ctx, cluster, r.Client)
serverIngress, err := server.Ingress(ctx, cluster, c.Client)
if err != nil {
return util.WrapErr("failed to create ingress object", err)
}
if err := r.Client.Create(ctx, serverIngress); err != nil {
if err := c.Client.Create(ctx, serverIngress); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create server ingress", err)
}
@@ -132,25 +219,28 @@ func (r *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
if err != nil {
return util.WrapErr("failed to generate new kubeconfig", err)
}
if err := r.Client.Create(ctx, kubeconfigSecret); err != nil {
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create kubeconfig secret", err)
}
}
return nil
return c.Client.Update(ctx, cluster)
}
func (r *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create a new namespace for the cluster
namespace := &v1.Namespace{
namespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: util.ClusterNamespace(cluster),
},
}
if err := controllerutil.SetControllerReference(cluster, namespace, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, &namespace, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, namespace); err != nil {
if err := c.Client.Create(ctx, &namespace); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.WrapErr("failed to create ns", err)
}
@@ -159,18 +249,18 @@ func (r *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alph
return nil
}
func (r *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.ServerConfig(cluster, true, serviceIP)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, initServerConfig, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, initServerConfig, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, initServerConfig); err != nil {
if err := c.Client.Create(ctx, initServerConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
@@ -181,10 +271,10 @@ func (r *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serverConfig, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, serverConfig, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, serverConfig); err != nil {
if err := c.Client.Create(ctx, serverConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
@@ -192,52 +282,54 @@ func (r *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
// create agents configuration
agentsConfig := config.AgentConfig(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, &agentsConfig); err != nil {
if err := c.Client.Create(ctx, &agentsConfig); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
func (r *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
// create cluster service
clusterService := server.Service(cluster)
if err := controllerutil.SetControllerReference(cluster, clusterService, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, clusterService, c.Scheme); err != nil {
return "", err
}
if err := r.Client.Create(ctx, clusterService); err != nil {
if err := c.Client.Create(ctx, clusterService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return "", err
}
}
service := v1.Service{}
if err := r.Client.Get(ctx,
client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service"},
&service); err != nil {
var service v1.Service
objKey := client.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
if err := c.Client.Get(ctx, objKey, &service); err != nil {
return "", err
}
return service.Spec.ClusterIP, nil
}
func (r *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment := server.Server(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, initServerDeployment); err != nil {
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
@@ -246,22 +338,22 @@ func (r *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1al
// create deployment for the rest of the servers
serversDeployment := server.Server(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversDeployment, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, serversDeployment); err != nil {
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsDeployment := agent.Agent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, r.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, agentsDeployment, c.Scheme); err != nil {
return err
}
if err := r.Client.Create(ctx, agentsDeployment); err != nil {
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}

View File

@@ -8,7 +8,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
@@ -48,15 +47,14 @@ type content struct {
func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
bootstrap := &controlRuntimeBootstrap{}
err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
var bootstrap *controlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = requestBootstrap(token, ip)
return err
})
if err != nil {
}); err != nil {
return nil, err
}
@@ -65,10 +63,8 @@ func GenerateNewKubeConfig(ctx context.Context, cluster *v1alpha1.Cluster, ip st
}
adminCert, adminKey, err := createClientCertKey(
adminCommonName,
[]string{user.SystemPrivilegedGroup},
nil,
[]x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
adminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
@@ -109,7 +105,7 @@ func requestBootstrap(token, serverIP string) (*controlRuntimeBootstrap, error)
Timeout: 5 * time.Second,
}
req, err := http.NewRequest("GET", url, nil)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
@@ -120,28 +116,16 @@ func requestBootstrap(token, serverIP string) (*controlRuntimeBootstrap, error)
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
runtimeBootstrap := controlRuntimeBootstrap{}
if err != nil {
return nil, err
}
if err := json.Unmarshal(body, &runtimeBootstrap); err != nil {
var runtimeBootstrap controlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
return &runtimeBootstrap, nil
}
func createClientCertKey(
commonName string,
organization []string,
altNames *certutil.AltNames,
extKeyUsage []x509.ExtKeyUsage,
caCert,
caKey string) ([]byte, []byte, error) {
func createClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
@@ -152,12 +136,12 @@ func createClientCertKey(
return nil, nil, err
}
keyBytes, err := generateKey()
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(keyBytes)
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
@@ -175,7 +159,7 @@ func createClientCertKey(
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), keyBytes, nil
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
@@ -183,6 +167,7 @@ func generateKey() (data []byte, err error) {
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
@@ -210,6 +195,7 @@ func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, err
if err != nil {
return nil, err
}
return kubeconfig, nil
}
@@ -225,23 +211,27 @@ func decodeBootstrap(bootstrap *controlRuntimeBootstrap) error {
return err
}
bootstrap.ClientCA.Content = string(decoded)
//client-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
if err != nil {
return err
}
bootstrap.ClientCAKey.Content = string(decoded)
//server-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
if err != nil {
return err
}
bootstrap.ServerCA.Content = string(decoded)
//server-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ServerCAKey.Content = string(decoded)
return nil
}

View File

@@ -24,6 +24,7 @@ func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
if init {
replicas = 1
}
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
@@ -58,6 +59,7 @@ func Server(cluster *v1alpha1.Cluster, init bool) *apps.Deployment {
func serverPodSpec(image, name string, args []string) v1.PodSpec {
privileged := true
return v1.PodSpec{
Volumes: []v1.Volume{
{

View File

@@ -10,16 +10,16 @@ import (
)
const (
NamespacePrefix = "k3k-"
K3SImageName = "rancher/k3s"
namespacePrefix = "k3k-"
k3SImageName = "rancher/k3s"
)
func ClusterNamespace(cluster *v1alpha1.Cluster) string {
return NamespacePrefix + cluster.Name
return namespacePrefix + cluster.Name
}
func K3SImage(cluster *v1alpha1.Cluster) string {
return K3SImageName + ":" + cluster.Spec.Version
return k3SImageName + ":" + cluster.Spec.Version
}
func WrapErr(errString string, err error) error {
@@ -27,24 +27,10 @@ func WrapErr(errString string, err error) error {
return err
}
// return all the nodes external addresses, if not found then return internal addresses
func Addresses(ctx context.Context, client client.Client) ([]string, error) {
addresses := []string{}
nodeList := v1.NodeList{}
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
func nodeAddress(node *v1.Node) string {
var externalIP string
var internalIP string
for _, node := range nodeList.Items {
addresses = append(addresses, getNodeAddress(&node))
}
return addresses, nil
}
func getNodeAddress(node *v1.Node) string {
externalIP := ""
internalIP := ""
for _, ip := range node.Status.Addresses {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
@@ -59,3 +45,19 @@ func getNodeAddress(node *v1.Node) string {
return internalIP
}
// return all the nodes external addresses, if not found then return internal addresses
func Addresses(ctx context.Context, client client.Client) ([]string, error) {
var nodeList v1.NodeList
if err := client.List(ctx, &nodeList); err != nil {
return nil, err
}
addresses := make([]string, len(nodeList.Items))
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}
return addresses, nil
}

10
pkg/version/version.go Normal file
View File

@@ -0,0 +1,10 @@
package version
import "strings"
var (
Program = "k3k"
ProgramUpper = strings.ToUpper(Program)
Version = "dev"
GitCommit = "HEAD"
)

View File

@@ -1,5 +1,5 @@
#!/bin/bash
set -e
set -ex
source $(dirname $0)/version
@@ -16,3 +16,11 @@ if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi

View File

@@ -7,6 +7,7 @@ cd $(dirname $0)/..
mkdir -p dist/artifacts
cp bin/k3k dist/artifacts/k3k${SUFFIX}
cp bin/k3kcli dist/artifacts/k3kcli${SUFFIX}
IMAGE=${REPO}/k3k:${TAG}
DOCKERFILE=package/Dockerfile

View File

@@ -20,7 +20,7 @@ fi
SUFFIX="-${ARCH}"
TAG=${TAG:-${VERSION}${SUFFIX}}
REPO=${REPO:-galal-hussein}
REPO=${REPO:-husseingalal}
if echo $TAG | grep -q dirty; then
TAG=dev