remove some unused code and additional updates

Signed-off-by: Brian Downs <brian.downs@gmail.com>
This commit is contained in:
Brian Downs
2023-08-01 10:58:18 -07:00
parent 46965eb692
commit d32ce24d31
28 changed files with 47 additions and 280 deletions

View File

@@ -1,4 +1,4 @@
ARG GOLANG=rancher/hardened-build-base:v1.20.6b11
ARG GOLANG=rancher/hardened-build-base:v1.20.6b2
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
@@ -20,5 +20,5 @@ ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./scripts/entry"]
ENTRYPOINT ["./ops/entry"]
CMD ["ci"]

View File

@@ -1,4 +1,4 @@
TARGETS := $(shell ls scripts)
TARGETS := $(shell ls ops)
.dapper:
@echo Downloading dapper

View File

@@ -17,11 +17,9 @@ var clusterSubcommands = []cli.Command{
}
func NewClusterCommand() cli.Command {
cmd := cli.Command{
return cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: clusterSubcommands,
}
return cmd
}

View File

@@ -148,9 +148,7 @@ func createCluster(clx *cli.Context) error {
return err
}
host := strings.Split(url.Host, ":")
cluster.Spec.TLSSANs = []string{
host[0],
}
cluster.Spec.TLSSANs = []string{host[0]}
if err := ctrlClient.Create(ctx, cluster); err != nil {
if apierrors.IsAlreadyExists(err) {
@@ -162,15 +160,13 @@ func createCluster(clx *cli.Context) error {
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
var kubeconfig []byte
err = retry.OnError(backoff, apierrors.IsNotFound, func() error {
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
if err != nil {
return err
}
return nil
})
if err != nil {
}); err != nil {
return err
}
@@ -178,11 +174,13 @@ func createCluster(clx *cli.Context) error {
if err != nil {
return err
}
logrus.Infof(`You can start using the cluster with:
export KUBECONFIG=%s
kubectl cluster-info
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfig, 0644)
}
@@ -199,6 +197,7 @@ func validateCreateFlags(clx *cli.Context) error {
if cmds.Kubeconfig == "" && os.Getenv("KUBECONFIG") == "" {
return errors.New("empty kubeconfig")
}
return nil
}
@@ -230,6 +229,7 @@ func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alp
Name: cluster.Name + "-kubeconfig",
Namespace: util.ClusterNamespace(cluster),
}
var kubeSecret v1.Secret
if err := client.Get(ctx, nn, &kubeSecret); err != nil {
return nil, err
@@ -244,10 +244,12 @@ func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alp
Name: "k3k-server-service",
Namespace: util.ClusterNamespace(cluster),
}
var k3kService v1.Service
if err := client.Get(ctx, nn, &k3kService); err != nil {
return nil, err
}
if k3kService.Spec.Type == v1.ServiceTypeNodePort {
nodePort := k3kService.Spec.Ports[0].NodePort
@@ -266,6 +268,7 @@ func extractKubeconfig(ctx context.Context, client client.Client, cluster *v1alp
}
kubeconfig = b
}
return kubeconfig, nil
}
@@ -297,5 +300,6 @@ func generateKubeconfigFromRest(config *rest.Config) clientcmdapi.Config {
CurrentContext: "default-context",
AuthInfos: authinfos,
}
return clientConfig
}

View File

@@ -5,17 +5,22 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/pkg/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
program = "k3k"
version = "dev"
gitCommit = "HEAD"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
cluster.NewClusterCommand(),
}
app.Version = version.Version + " (" + version.GitCommit + ")"
app.Version = version + " (" + gitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)

View File

@@ -16,9 +16,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
)
var (
Scheme = runtime.NewScheme()
)
var Scheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(Scheme)

View File

@@ -5,10 +5,12 @@ source $(dirname $0)/version
cd $(dirname $0)/..
mkdir -p bin
mkdir -p bin deploy
if [ "$(uname)" = "Linux" ]; then
OTHER_LINKFLAGS="-extldflags -static -s"
fi
LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k

View File

View File

@@ -2,8 +2,8 @@
set -e
mkdir -p bin dist
if [ -e ./scripts/$1 ]; then
./scripts/"$@"
if [ -e ./ops/$1 ]; then
./ops/"$@"
else
exec "$@"
fi

View File

@@ -5,7 +5,7 @@ cd $(dirname $0)/..
go generate
source ./scripts/version
source ./ops/version
if [ -n "$DIRTY" ]; then
echo Git is dirty

View File

@@ -1,50 +0,0 @@
package addressallocator
import (
"context"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
AddressAllocatorController = "address-allocator-controller"
)
type AddressAllocatorReconciler struct {
Client client.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func Add(mgr manager.Manager) error {
// initialize a new Reconciler
reconciler := AddressAllocatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
controller, err := controller.New(AddressAllocatorController, mgr, controller.Options{
Reconciler: &reconciler,
MaxConcurrentReconciles: 1,
})
if err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}},
&handler.EnqueueRequestForObject{})
}
// Reconcile will allocate cluster/service cidrs to new clusters
func (r *AddressAllocatorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

View File

@@ -1,129 +0,0 @@
package cluster
import (
"context"
"fmt"
"net"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
"k8s.io/apimachinery/pkg/types"
)
const (
cidrAllocationClusterPoolName = "k3k-cluster-cidr-allocation-pool"
cidrAllocationServicePoolName = "k3k-service-cidr-allocation-pool"
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
// determineOctet dertermines the octet for the
// given mask bits of a subnet.
func determineOctet(mb int) uint8 {
switch {
case mb <= 8:
return 1
case mb >= 8 && mb <= 16:
return 2
case mb >= 8 && mb <= 24:
return 3
case mb >= 8 && mb <= 32:
return 4
default:
return 0
}
}
// generateSubnets generates all subnets for the given CIDR.
func generateSubnets(cidr string) ([]string, error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
usedBits, _ := ipNet.Mask.Size()
octet := determineOctet(usedBits)
ip := ipNet.IP.To4()
octetVal := ip[octet-1]
var subnets []string
for i := octetVal; i < 254; i++ {
octetVal++
ip[octet-1] = octetVal
subnets = append(subnets, fmt.Sprintf("%s/%d", ip, usedBits))
}
return subnets, nil
}
// nextCIDR retrieves the next available CIDR address from the given pool.
func (c *ClusterReconciler) nextCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) (*net.IPNet, error) {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return nil, util.WrapErr("failed to get cidrpool", err)
}
var ipNet *net.IPNet
for _, pool := range cidrPool.Status.Pool {
if pool.ClusterName == clusterName {
_, ipn, err := net.ParseCIDR(pool.IPNet)
if err != nil {
return nil, util.WrapErr("failed to parse cidr", err)
}
return ipn, nil
}
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == "" && cidrPool.Status.Pool[i].Issued == 0 {
cidrPool.Status.Pool[i].ClusterName = clusterName
cidrPool.Status.Pool[i].Issued = time.Now().Unix()
_, ipn, err := net.ParseCIDR(cidrPool.Status.Pool[i].IPNet)
if err != nil {
return nil, util.WrapErr("failed to parse cidr", err)
}
if err := c.Client.Update(ctx, &cidrPool); err != nil {
return nil, util.WrapErr("failed to update cidr pool", err)
}
ipNet = ipn
break
}
}
return ipNet, nil
}
// releaseCIDR updates the given CIDR pool by marking the address as available.
func (c *ClusterReconciler) releaseCIDR(ctx context.Context, cidrAllocationPoolName, clusterName string) error {
var cidrPool v1alpha1.CIDRAllocationPool
nn := types.NamespacedName{
Name: cidrAllocationPoolName,
}
if err := c.Client.Get(ctx, nn, &cidrPool); err != nil {
return err
}
for i := 0; i < len(cidrPool.Status.Pool); i++ {
if cidrPool.Status.Pool[i].ClusterName == clusterName {
cidrPool.Status.Pool[i].ClusterName = ""
cidrPool.Status.Pool[i].Issued = 0
}
if err := c.Client.Status().Update(ctx, &cidrPool); err != nil {
return err
}
}
return nil
}

View File

@@ -9,8 +9,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentConfigData(serviceIP, cluster.Spec.Token)
func Agent(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
config := agentData(serviceIP, cluster.Spec.Token)
return v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -27,7 +27,7 @@ func AgentConfig(cluster *v1alpha1.Cluster, serviceIP string) v1.Secret {
}
}
func agentConfigData(serviceIP, token string) string {
func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
}

View File

@@ -7,13 +7,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret, error) {
name := "k3k-server-config"
if init {
name = "k3k-init-server-config"
}
config := serverConfigData(serviceIP, cluster)
config := serverData(serviceIP, cluster)
if init {
config = initConfigData(cluster)
}
@@ -32,7 +32,7 @@ func ServerConfig(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.S
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster) string {
func serverData(serviceIP string, cluster *v1alpha1.Cluster) string {
return "cluster-init: true\nserver: https://" + serviceIP + ":6443" + serverOptions(cluster)
}

View File

@@ -102,16 +102,6 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
return util.WrapErr("failed to create ns", err)
}
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultClusterCIDR
}
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
cluster.Status.ServiceCIDR = defaultClusterServiceCIDR
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster)
if err != nil {
@@ -177,7 +167,7 @@ func (c *ClusterReconciler) createNamespace(ctx context.Context, cluster *v1alph
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string) error {
// create init node config
initServerConfig, err := config.ServerConfig(cluster, true, serviceIP)
initServerConfig, err := config.Server(cluster, true, serviceIP)
if err != nil {
return err
}
@@ -193,7 +183,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
}
// create servers configuration
serverConfig, err := config.ServerConfig(cluster, false, serviceIP)
serverConfig, err := config.Server(cluster, false, serviceIP)
if err != nil {
return err
}
@@ -207,7 +197,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
}
// create agents configuration
agentsConfig := config.AgentConfig(cluster, serviceIP)
agentsConfig := config.Agent(cluster, serviceIP)
if err := controllerutil.SetControllerReference(cluster, &agentsConfig, c.Scheme); err != nil {
return err
}
@@ -289,30 +279,7 @@ func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1al
}
func (c *ClusterReconciler) createCIDRPools(ctx context.Context) error {
clusterSubnets, err := generateSubnets(defaultClusterCIDR)
if err != nil {
return err
}
var clusterSubnetAllocations []v1alpha1.Allocation
for _, cs := range clusterSubnets {
clusterSubnetAllocations = append(clusterSubnetAllocations, v1alpha1.Allocation{
IPNet: cs,
})
}
cidrClusterPool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationClusterPoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterSubnetAllocations,
},
}
if err := c.Client.Create(ctx, &cidrClusterPool); err != nil {
if err := c.Client.Create(ctx, &v1alpha1.CIDRAllocationPool{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
@@ -320,35 +287,13 @@ func (c *ClusterReconciler) createCIDRPools(ctx context.Context) error {
}
}
clusterServiceSubnets, err := generateSubnets(defaultClusterServiceCIDR)
if err != nil {
return err
}
var clusterServiceSubnetAllocations []v1alpha1.Allocation
for _, ss := range clusterServiceSubnets {
clusterServiceSubnetAllocations = append(clusterServiceSubnetAllocations, v1alpha1.Allocation{
IPNet: ss,
})
}
cidrServicePool := v1alpha1.CIDRAllocationPool{
ObjectMeta: metav1.ObjectMeta{
Name: cidrAllocationServicePoolName,
},
Spec: v1alpha1.CIDRAllocationPoolSpec{
DefaultClusterCIDR: defaultClusterCIDR,
},
Status: v1alpha1.CIDRAllocationPoolStatus{
Pool: clusterServiceSubnetAllocations,
},
}
if err := c.Client.Create(ctx, &cidrServicePool); err != nil {
if err := c.Client.Create(ctx, &v1alpha1.CIDRAllocationPool{}); err != nil {
if !apierrors.IsAlreadyExists(err) {
// return nil since the resource has
// already been created
return err
}
}
return nil
}

View File

@@ -46,8 +46,9 @@ func Ingress(ctx context.Context, cluster *v1alpha1.Cluster, client client.Clien
}
func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.IngressRule {
ingressRules := []networkingv1.IngressRule{}
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: cluster.Name + "." + address + wildcardDNS,
@@ -72,6 +73,7 @@ func ingressRules(cluster *v1alpha1.Cluster, addresses []string) []networkingv1.
}
ingressRules = append(ingressRules, rule)
}
return ingressRules
}

View File

@@ -16,6 +16,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
}
}
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",

View File

@@ -35,7 +35,8 @@ func nodeAddress(node *v1.Node) string {
if ip.Type == "ExternalIP" && ip.Address != "" {
externalIP = ip.Address
break
} else if ip.Type == "InternalIP" && ip.Address != "" {
}
if ip.Type == "InternalIP" && ip.Address != "" {
internalIP = ip.Address
}
}

View File

@@ -1,10 +0,0 @@
package version
import "strings"
var (
Program = "k3k"
ProgramUpper = strings.ToUpper(Program)
Version = "dev"
GitCommit = "HEAD"
)