Compare commits

..

14 Commits

Author SHA1 Message Date
Hussein Galal
ba35d12124 Cluster spec update (#90)
* Remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* enable cluster server and agent update

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-25 06:37:59 +02:00
Hussein Galal
6fc22df6bc Cluster type validations (#89)
* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Cluster type validations

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-12 23:09:30 +02:00
Hussein Galal
c92f722122 Add delete subcommand (#88)
* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add delete subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 02:36:12 +02:00
Hussein Galal
5e141fe98e Add kubeconfig subcommand (#87)
* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubeconfig subcommand

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-11 00:57:46 +02:00
Hussein Galal
4b2308e709 Update chart to v0.1.2-r1 (#82)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 07:38:54 +02:00
Hussein Galal
3cdcb04e1a Add validation for system cluster name for both controller and cli (#81)
* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add validation for system cluster name for both controller and cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-06 02:15:20 +02:00
Hussein Galal
fedfa109b5 Fix append to empty slice (#80)
* Fix append to empty slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix initialization of addresses slice

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-04 01:49:48 +02:00
Hussein Galal
99d043f2ee fix chart releases (#79)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:55:09 +02:00
Hussein Galal
57ed675a7f fix chart releases (#78)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:49:05 +02:00
Hussein Galal
7c9060c394 fix chart release (#77)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:37:08 +02:00
Hussein Galal
a104aacf5f Add github config mail and username for pushing k3k release (#76)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:24:46 +02:00
Hussein Galal
6346b06eb3 Add github config mail and username for pushing k3k release (#75)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 02:08:10 +02:00
Hussein Galal
6fd745f268 Fix chart release (#74)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 01:53:26 +02:00
Hussein Galal
1258fb6d58 Upgrade chart and fix manifest (#73)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2024-01-03 00:03:08 +02:00
21 changed files with 602 additions and 328 deletions

View File

@@ -20,9 +20,9 @@ steps:
- name: docker
path: /var/run/docker.sock
when:
branch:
ref:
exclude:
- k3k-chart
- refs/tags/chart-*
- name: package-chart
image: rancher/dapper:v0.6.0
@@ -35,26 +35,50 @@ steps:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
ref:
- refs/tags/chart-*
instance:
- drone-publish.rancher.io
event:
- tag
- name: release-chart
image: plugins/github-release
settings:
api_key:
from_secret: github_token
checksum:
- sha256
checksum_file: CHECKSUMsum.txt
checksum_flatten: true
files:
- "deploy/*"
when:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/tags/chart-*
event:
- tag
- name: index-chart
image: rancher/dapper:v0.6.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper release-chart
- dapper index-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
branch:
- k3k-chart
ref:
- refs/tags/chart-*
instance:
- drone-publish.rancher.io
event:
- tag
- name: github_binary_release
image: plugins/github-release
@@ -72,13 +96,13 @@ steps:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
include:
- refs/head/master
- refs/tags/*
exclude:
- refs/tags/chart-*
event:
- tag
branch:
exclude:
- k3k-chart
- name: docker-publish
image: plugins/docker
@@ -93,13 +117,13 @@ steps:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
include:
- refs/head/master
- refs/tags/*
exclude:
- refs/tags/chart-*
event:
- tag
branch:
exclude:
- k3k-chart
volumes:
- name: docker
@@ -129,10 +153,10 @@ steps:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/head/master
- refs/tags/*
branch:
exclude:
- k3k-chart
- refs/tags/chart-*
depends_on:
- amd64

3
.gitignore vendored
View File

@@ -4,4 +4,5 @@
/dist
*.swp
.idea
.vscode/
__debug*

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.0-r1
appVersion: 0.0.0-alpha6
version: 0.1.2-r1
appVersion: 0.1.1

View File

@@ -21,16 +21,34 @@ spec:
type: string
servers:
type: integer
x-kubernetes-validations:
- message: cluster must have at least one server
rule: self >= 1
agents:
type: integer
x-kubernetes-validations:
- message: invalid value for agents
rule: self >= 0
token:
type: string
x-kubernetes-validations:
- message: token is immutable
rule: self == oldSelf
clusterCIDR:
type: string
x-kubernetes-validations:
- message: clusterCIDR is immutable
rule: self == oldSelf
serviceCIDR:
type: string
x-kubernetes-validations:
- message: serviceCIDR is immutable
rule: self == oldSelf
clusterDNS:
type: string
x-kubernetes-validations:
- message: clusterDNS is immutable
rule: self == oldSelf
serverArgs:
type: array
items:

View File

@@ -5,7 +5,7 @@ image:
repository: rancher/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.0.0-alpha6"
tag: "v0.1.1"
imagePullSecrets: []
nameOverride: ""

View File

@@ -5,21 +5,29 @@ import (
"github.com/urfave/cli"
)
var clusterSubcommands = []cli.Command{
var subcommands = []cli.Command{
{
Name: "create",
Usage: "Create new cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: createCluster,
Action: create,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
},
{
Name: "delete",
Usage: "Delete an existing cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: delete,
Flags: append(cmds.CommonFlags, clusterDeleteFlags...),
},
}
func NewClusterCommand() cli.Command {
func NewCommand() cli.Command {
return cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
Subcommands: subcommands,
}
}

View File

@@ -3,7 +3,6 @@ package cluster
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
@@ -12,20 +11,19 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -120,7 +118,7 @@ var (
}
)
func createCluster(clx *cli.Context) error {
func create(clx *cli.Context) error {
ctx := context.Background()
if err := validateCreateFlags(clx); err != nil {
return err
@@ -173,11 +171,16 @@ func createCluster(clx *cli.Context) error {
}
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
cfg := &kubeconfig.KubeConfig{
CN: util.AdminCommonName,
ORG: []string{user.SystemPrivilegedGroup},
ExpiryDate: 0,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
if err != nil {
logrus.Infof("waiting for cluster to be available: %v", err)
return err
}
return nil
@@ -210,6 +213,9 @@ func validateCreateFlags(clx *cli.Context) error {
if name == "" {
return errors.New("empty cluster name")
}
if name == cluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if servers <= 0 {
return errors.New("invalid number of servers")
}
@@ -230,7 +236,6 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
APIVersion: "k3k.io/v1alpha1",
},
Spec: v1alpha1.ClusterSpec{
Name: name,
Token: token,
Servers: &servers,
Agents: &agents,
@@ -246,83 +251,3 @@ func newCluster(name, token string, servers, agents int32, clusterCIDR, serviceC
},
}
}
func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, serverIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
}
var kubeSecret v1.Secret
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
return nil, err
}
kubeconfig := kubeSecret.Data["kubeconfig.yaml"]
if kubeconfig == nil {
return nil, errors.New("empty kubeconfig")
}
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
if err != nil {
return nil, err
}
hostURL := fmt.Sprintf("https://%s:%d", serverIP, nodePort)
restConfig.Host = hostURL
clientConfig := generateKubeconfigFromRest(restConfig)
b, err := clientcmd.Write(clientConfig)
if err != nil {
return nil, err
}
kubeconfig = b
}
return kubeconfig, nil
}
func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
clusters := make(map[string]*clientcmdapi.Cluster)
clusters["default-cluster"] = &clientcmdapi.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts["default-context"] = &clientcmdapi.Context{
Cluster: "default-cluster",
Namespace: "default",
AuthInfo: "default",
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos["default"] = &clientcmdapi.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: "default-context",
AuthInfos: authinfos,
}
return clientConfig
}

View File

@@ -1 +1,47 @@
package cluster
import (
"context"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
clusterDeleteFlags = []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
}
)
func delete(clx *cli.Context) error {
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
logrus.Infof("deleting [%s] cluster", name)
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
return ctrlClient.Delete(ctx, &cluster)
}

View File

@@ -0,0 +1,169 @@
package kubeconfig
import (
"context"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = v1alpha1.AddToScheme(Scheme)
}
var (
Scheme = runtime.NewScheme()
name string
cn string
org cli.StringSlice
altNames cli.StringSlice
expirationDays int64
configName string
backoff = wait.Backoff{
Steps: 5,
Duration: 20 * time.Second,
Factor: 2,
Jitter: 0.1,
}
generateKubeconfigFlags = []cli.Flag{
cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: util.AdminCommonName,
},
cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
Value: 356,
},
}
)
var subcommands = []cli.Command{
{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: generate,
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
},
}
func NewCommand() cli.Command {
return cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
}
}
func generate(clx *cli.Context) error {
var cluster v1alpha1.Cluster
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", cmds.Kubeconfig)
if err != nil {
return err
}
ctrlClient, err := client.New(restConfig, client.Options{
Scheme: Scheme,
})
if err != nil {
return err
}
clusterKey := types.NamespacedName{
Name: name,
}
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}
url, err := url.Parse(restConfig.Host)
if err != nil {
return err
}
host := strings.Split(url.Host, ":")
certAltNames := kubeconfig.AddSANs(altNames)
if org == nil {
org = cli.StringSlice{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: org,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}
logrus.Infof("waiting for cluster to be available..")
var kubeconfig []byte
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
pwd, err := os.Getwd()
if err != nil {
return err
}
if configName == "" {
configName = cluster.Name + "-kubeconfig.yaml"
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, configName))
return os.WriteFile(configName, kubeconfig, 0644)
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/cli/cmds/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@@ -18,7 +19,8 @@ const (
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
cluster.NewCommand(),
kubeconfig.NewCommand(),
}
app.Version = version + " (" + gitCommit + ")"

View File

@@ -3,16 +3,4 @@ manifests:
- image: rancher/k3k:{{replace "+" "-" build.tag}}-amd64
platform:
architecture: amd64
os: linux
- image: rancher/k3k:{{replace "+" "-" build.tag}}-arm64
platform:
architecture: arm64
os: linux
- image: rancher/k3k:{{replace "+" "-" build.tag}}-windows-amd64
platform:
architecture: amd64
os: windows
- image: rancher/k3k:{{replace "+" "-" build.tag}}-s390x
platform:
architecture: s390x
os: linux

View File

@@ -12,21 +12,20 @@ if [ $(git tag -l "$version") ]; then
exit 1
fi
# release the chart with artifacts
cr upload --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--git-repo k3k \
--skip-existing \
-o rancher
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher \
--push
-o rancher
# push to gh-pages
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
git config --global user.name "galal-hussein"
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
# push index.yaml to gh-pages
git add index.yaml
git commit -m "add chart-${CHART_TAG} to index.yaml"
git push --force --set-upstream origin HEAD:gh-pages

View File

@@ -16,7 +16,6 @@ type Cluster struct {
}
type ClusterSpec struct {
Name string `json:"name"`
Version string `json:"version"`
Servers *int32 `json:"servers"`
Agents *int32 `json:"agents"`

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"net/url"
"reflect"
"strings"
"time"
@@ -15,6 +16,8 @@ import (
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
@@ -27,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
@@ -40,6 +43,7 @@ const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
maxConcurrentReconciles = 1
@@ -50,7 +54,7 @@ const (
)
type ClusterReconciler struct {
Client client.Client
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
}
@@ -95,18 +99,16 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
clusterName = s[1]
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster object", err)
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
if *cluster.Spec.Servers == 1 {
klog.Infof("skipping request for etcd pod for cluster [%s] since it is not in HA mode", clusterName)
return reconcile.Result{}, nil
}
matchingLabels := client.MatchingLabels(map[string]string{"role": "server"})
listOpts := &client.ListOptions{Namespace: req.Namespace}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
klog.Infof("Handle etcd server pod [%s/%s]", pod.Namespace, pod.Name)
@@ -119,7 +121,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if cluster.DeletionTimestamp.IsZero() {
@@ -132,7 +134,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
// we create a namespace for each new cluster
var ns v1.Namespace
objKey := client.ObjectKey{
objKey := ctrlruntimeclient.ObjectKey{
Name: util.ClusterNamespace(&cluster),
}
if err := c.Client.Get(ctx, objKey, &ns); err != nil {
@@ -149,12 +151,12 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// remove finalizer from the server pods and update them.
matchingLabels := client.MatchingLabels(map[string]string{"role": "server"})
listOpts := &client.ListOptions{Namespace: util.ClusterNamespace(&cluster)}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: util.ClusterNamespace(&cluster)}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
@@ -178,6 +180,10 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
if err := c.validate(cluster); err != nil {
klog.Errorf("invalid change: %v", err)
return nil
}
s := server.New(cluster, c.Client)
if cluster.Spec.Persistence != nil {
@@ -239,12 +245,12 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
}
}
kubeconfigSecret, err := s.GenerateNewKubeConfig(ctx, serviceIP)
bootstrapSecret, err := bootstrap.Generate(ctx, cluster, serviceIP)
if err != nil {
return util.LogAndReturnErr("failed to generate new kubeconfig", err)
}
if err := c.Client.Create(ctx, kubeconfigSecret); err != nil {
if err := c.Client.Create(ctx, bootstrapSecret); err != nil {
if !apierrors.IsAlreadyExists(err) {
return util.LogAndReturnErr("failed to create kubeconfig secret", err)
}
@@ -333,7 +339,7 @@ func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v
var service v1.Service
objKey := client.ObjectKey{
objKey := ctrlruntimeclient.ObjectKey{
Namespace: util.ClusterNamespace(cluster),
Name: "k3k-server-service",
}
@@ -364,10 +370,8 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
if err := c.Client.Create(ctx, ServerStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
if err := c.ensure(ctx, ServerStatefulSet, false); err != nil {
return err
}
return nil
@@ -381,49 +385,12 @@ func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster
return err
}
if err := c.Client.Create(ctx, agentsDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
if err := c.ensure(ctx, agentsDeployment, false); err != nil {
return err
}
return nil
}
func serverData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
}
func initConfigData(cluster *v1alpha1.Cluster) string {
return "cluster-init: true\n" + serverOptions(cluster)
}
func serverOptions(cluster *v1alpha1.Cluster) string {
var opts string
// TODO: generate token if not found
if cluster.Spec.Token != "" {
opts = "token: " + cluster.Spec.Token + "\n"
}
if cluster.Status.ClusterCIDR != "" {
opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n"
}
if cluster.Status.ServiceCIDR != "" {
opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n"
}
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}
// TODO: Add extra args to the options
return opts
}
func agentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
@@ -457,13 +424,15 @@ func (c *ClusterReconciler) handleServerPod(ctx context.Context, cluster v1alpha
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
if cluster.Status.Persistence.Type != server.EphermalNodesType {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := c.getETCDTLS(&cluster)
if err != nil {
@@ -535,18 +504,18 @@ func (c *ClusterReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config,
klog.Infof("generating etcd TLS client certificate for cluster [%s]", cluster.Name)
token := cluster.Spec.Token
endpoint := "k3k-server-service." + util.ClusterNamespace(cluster)
var bootstrap *server.ControlRuntimeBootstrap
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = server.DecodedBootstrap(token, endpoint)
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := server.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, bootstrap.ETCDServerCA.Content, bootstrap.ETCDServerCAKey.Content)
etcdCert, etcdKey, err := kubeconfig.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
@@ -555,7 +524,7 @@ func (c *ClusterReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config,
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(bootstrap.ETCDServerCA.Content))
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
@@ -567,3 +536,48 @@ func (c *ClusterReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config,
Certificates: []tls.Certificate{clientCert},
}, nil
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
if cluster.Name == ClusterInvalidName {
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
}
return nil
}
func (c *ClusterReconciler) ensure(ctx context.Context, obj ctrlruntimeclient.Object, requiresRecreate bool) error {
exists := true
existingObject := obj.DeepCopyObject().(ctrlruntimeclient.Object)
if err := c.Client.Get(ctx, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, existingObject); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get Object(%T): %w", existingObject, err)
}
exists = false
}
if !exists {
// if not exists create object
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
return nil
}
// if exists then apply udpate or recreate if necessary
if reflect.DeepEqual(obj.(metav1.Object), existingObject.(metav1.Object)) {
return nil
}
if !requiresRecreate {
if err := c.Client.Update(ctx, obj); err != nil {
return err
}
} else {
// this handles object that needs recreation including configmaps and secrets
if err := c.Client.Delete(ctx, obj); err != nil {
return err
}
if err := c.Client.Create(ctx, obj); err != nil {
return err
}
}
return nil
}

View File

@@ -1,34 +1,22 @@
package server
package bootstrap
import (
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/util/retry"
)
const (
adminCommonName = "system:admin"
port = 6443
)
type ControlRuntimeBootstrap struct {
ServerCA content
ServerCAKey content
ServerCA content `json:"serverCA"`
ServerCAKey content `json:"server"`
ClientCA content
ClientCAKey content
ETCDServerCA content
@@ -40,13 +28,11 @@ type content struct {
Content string
}
// GenerateNewKubeConfig generates the kubeconfig for the cluster:
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- generate client admin cert/key
// 3- use the ca cert from the bootstrap data & admin cert/key to write a new kubeconfig
// 4- save the new kubeconfig as a secret
func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secret, error) {
token := s.cluster.Spec.Token
// 2- save the bootstrap data as a secret
func Generate(ctx context.Context, cluster *v1alpha1.Cluster, ip string) (*v1.Secret, error) {
token := cluster.Spec.Token
var bootstrap *ControlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
@@ -63,32 +49,21 @@ func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secr
return nil, err
}
adminCert, adminKey, err := CreateClientCertKey(
adminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
bootstrapData, err := json.Marshal(bootstrap)
if err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", ip, port)
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(s.cluster),
Name: cluster.Name + "-bootstrap",
Namespace: "k3k-" + cluster.Name,
},
Data: map[string][]byte{
"kubeconfig.yaml": kubeconfigData,
"bootstrap": bootstrapData,
},
}, nil
@@ -126,80 +101,6 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
return &runtimeBootstrap, nil
}
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))

View File

@@ -15,6 +15,8 @@ const (
nginxSSLPassthroughAnnotation = "nginx.ingress.kubernetes.io/ssl-passthrough"
nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol"
nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect"
serverPort = 6443
etcdPort = 2379
)
func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) {
@@ -22,7 +24,6 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking
if err != nil {
return nil, err
}
ingressRules := s.ingressRules(addresses)
ingress := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
@@ -47,7 +48,6 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking
func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: s.cluster.Name + "." + address + wildcardDNS,
@@ -61,7 +61,7 @@ func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
Service: &networkingv1.IngressServiceBackend{
Name: "k3k-server-service",
Port: networkingv1.ServiceBackendPort{
Number: port,
Number: serverPort,
},
},
},

View File

@@ -212,7 +212,7 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster)
replicas = *cluster.Spec.Servers
if cluster.Spec.Persistence.Type != EphermalNodesType {
if cluster.Spec.Persistence != nil && cluster.Spec.Persistence.Type != EphermalNodesType {
persistent = true
pvClaims = []v1.PersistentVolumeClaim{
{

View File

@@ -36,12 +36,12 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: port,
Port: serverPort,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: 2379,
Port: etcdPort,
},
},
},
@@ -70,12 +70,12 @@ func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service {
{
Name: "k3s-server-port",
Protocol: v1.ProtocolTCP,
Port: 6443,
Port: serverPort,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: 2379,
Port: etcdPort,
},
},
},

View File

@@ -0,0 +1,71 @@
package kubeconfig
import (
"crypto"
"crypto/x509"
"fmt"
"net"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
)
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
}
caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert))
if err != nil {
return nil, nil, err
}
b, err := generateKey()
if err != nil {
return nil, nil, err
}
key, err := certutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, nil, err
}
cfg := certutil.Config{
CommonName: commonName,
Organization: organization,
Usages: extKeyUsage,
ExpiresAt: expiresAt,
}
if altNames != nil {
cfg.AltNames = *altNames
}
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer))
if err != nil {
return nil, nil, err
}
return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil
}
func generateKey() (data []byte, err error) {
generatedData, err := certutil.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, fmt.Errorf("error generating key: %v", err)
}
return generatedData, nil
}
func AddSANs(sans []string) certutil.AltNames {
var altNames certutil.AltNames
for _, san := range sans {
ip := net.ParseIP(san)
if ip == nil {
altNames.DNSNames = append(altNames.DNSNames, san)
} else {
altNames.IPs = append(altNames.IPs, ip)
}
}
return altNames
}

View File

@@ -0,0 +1,108 @@
package kubeconfig
import (
"context"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type KubeConfig struct {
AltNames certutil.AltNames
CN string
ORG []string
ExpiryDate time.Duration
}
func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) ([]byte, error) {
nn := types.NamespacedName{
Name: cluster.Name + "-bootstrap",
Namespace: util.ClusterNamespace(cluster),
}
var bootstrapSecret v1.Secret
if err := client.Get(ctx, nn, &bootstrapSecret); err != nil {
return nil, err
}
bootstrapData := bootstrapSecret.Data["bootstrap"]
if bootstrapData == nil {
return nil, errors.New("empty bootstrap")
}
var bootstrap bootstrap.ControlRuntimeBootstrap
if err := json.Unmarshal(bootstrapData, &bootstrap); err != nil {
return nil, err
}
adminCert, adminKey, err := CreateClientCertKey(
k.CN, k.ORG,
&k.AltNames, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, k.ExpiryDate,
bootstrap.ClientCA.Content,
bootstrap.ClientCAKey.Content)
if err != nil {
return nil, err
}
// get the server service to extract the right IP
nn = types.NamespacedName{
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
url := fmt.Sprintf("https://%s:%d", k3kService.Spec.ClusterIP, util.ServerPort)
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
url = fmt.Sprintf("https://%s:%d", hostServerIP, nodePort)
}
kubeconfigData, err := kubeconfig(url, []byte(bootstrap.ServerCA.Content), adminCert, adminKey)
if err != nil {
return nil, err
}
return kubeconfigData, nil
}
func kubeconfig(url string, serverCA, clientCert, clientKey []byte) ([]byte, error) {
config := clientcmdapi.NewConfig()
cluster := clientcmdapi.NewCluster()
cluster.CertificateAuthorityData = serverCA
cluster.Server = url
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = clientCert
authInfo.ClientKeyData = clientKey
context := clientcmdapi.NewContext()
context.AuthInfo = "default"
context.Cluster = "default"
config.Clusters["default"] = cluster
config.AuthInfos["default"] = authInfo
config.Contexts["default"] = context
config.CurrentContext = "default"
kubeconfig, err := clientcmd.Write(*config)
if err != nil {
return nil, err
}
return kubeconfig, nil
}

View File

@@ -12,6 +12,8 @@ import (
const (
namespacePrefix = "k3k-"
k3SImageName = "rancher/k3s"
AdminCommonName = "system:admin"
ServerPort = 6443
)
const (
@@ -58,8 +60,7 @@ func Addresses(ctx context.Context, client client.Client) ([]string, error) {
return nil, err
}
addresses := make([]string, len(nodeList.Items))
var addresses []string
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}