mirror of
https://github.com/rancher/k3k.git
synced 2026-03-02 01:30:27 +00:00
Compare commits
18 Commits
v0.3.1-rc1
...
v0.3.2-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cb2399b89 | ||
|
|
90568f24b1 | ||
|
|
0843a9e313 | ||
|
|
b58578788c | ||
|
|
c4cc1e69cd | ||
|
|
bd947c0fcb | ||
|
|
b0b61f8d8e | ||
|
|
3281d54c6c | ||
|
|
853b0a7e05 | ||
|
|
28b15d2e92 | ||
|
|
cad59c0494 | ||
|
|
d0810af17c | ||
|
|
2b7202e676 | ||
|
|
4975b0b799 | ||
|
|
90d17cd6dd | ||
|
|
3e5e9c7965 | ||
|
|
1d027909ee | ||
|
|
6105402bf2 |
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve K3K! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
|
||||
**Environmental Info:**
|
||||
Host Cluster Version:
|
||||
<!-- For example K3S v1.32.1+k3s1 or RKE2 v1.31.5+rke2r1 -->
|
||||
|
||||
Node(s) CPU architecture, OS, and Version:
|
||||
<!-- Provide the output from "uname -a" on the node(s) -->
|
||||
|
||||
Host Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "1 servers, 2 agents CNI: Flannel". -->
|
||||
|
||||
K3K Cluster Configuration:
|
||||
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
|
||||
|
||||
**Describe the bug:**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**Steps To Reproduce:**
|
||||
- Created a cluster with `k3k create`:
|
||||
|
||||
**Expected behavior:**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**Actual behavior:**
|
||||
<!-- A clear and concise description of what actually happened. -->
|
||||
|
||||
**Additional context / logs:**
|
||||
<!-- Add any other context and/or logs about the problem here. -->
|
||||
<!-- kubectl logs -n k3k-system -l app.kubernetes.io/instance=k3k -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> k3k-<cluster-name>-server-0 -->
|
||||
<!-- $ kubectl logs -n <cluster-namespace> -l cluster=<cluster-name>,mode=shared # in shared mode -->
|
||||
8
Makefile
8
Makefile
@@ -54,7 +54,7 @@ push-%:
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) -v -r
|
||||
$(GINKGO) -v -r --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
@@ -87,10 +87,10 @@ lint: ## Find any linting issues in the project
|
||||
|
||||
.PHONY: validate
|
||||
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git --no-pager diff go.mod go.sum
|
||||
test -z "$(shell git status --porcelain)"
|
||||
|
||||
git status --porcelain
|
||||
git --no-pager diff --exit-code
|
||||
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.0-r1
|
||||
appVersion: v0.3.0
|
||||
version: 0.3.1-r2
|
||||
appVersion: v0.3.1
|
||||
|
||||
@@ -94,29 +94,6 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
clusterLimit:
|
||||
description: Limit defines resource limits for server/agent nodes.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
Expose specifies options for exposing the API server.
|
||||
@@ -225,6 +202,15 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server nodes.
|
||||
type: object
|
||||
servers:
|
||||
default: 1
|
||||
description: |-
|
||||
@@ -271,6 +257,15 @@ spec:
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1).
|
||||
If not specified, the Kubernetes version of the host node will be used.
|
||||
type: string
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
|
||||
@@ -14,7 +14,14 @@ spec:
|
||||
singular: clusterset
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.displayName
|
||||
name: Display Name
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@@ -42,10 +49,10 @@ spec:
|
||||
default: {}
|
||||
description: Spec defines the desired state of the ClusterSet.
|
||||
properties:
|
||||
allowedNodeTypes:
|
||||
allowedModeTypes:
|
||||
default:
|
||||
- shared
|
||||
description: AllowedNodeTypes specifies the allowed cluster provisioning
|
||||
description: AllowedModeTypes specifies the allowed cluster provisioning
|
||||
modes. Defaults to [shared].
|
||||
items:
|
||||
description: ClusterMode is the possible provisioning mode of a
|
||||
@@ -59,30 +66,6 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: mode is immutable
|
||||
rule: self == oldSelf
|
||||
defaultLimits:
|
||||
description: DefaultLimits specifies the default resource limits for
|
||||
servers/agents when a cluster in the set doesn't provide any.
|
||||
properties:
|
||||
serverLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: ServerLimit specifies resource limits for server
|
||||
nodes.
|
||||
type: object
|
||||
workerLimit:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: WorkerLimit specifies resource limits for agent nodes.
|
||||
type: object
|
||||
type: object
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
@@ -97,15 +80,84 @@ spec:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
maxLimits:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimits specifies the maximum resource limits that
|
||||
apply to all clusters (server + agent) in the set.
|
||||
displayName:
|
||||
description: DisplayName is the human-readable name for the set.
|
||||
type: string
|
||||
limit:
|
||||
description: |-
|
||||
Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
|
||||
to set defaults and constraints (min/max)
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- limits
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
@@ -115,6 +167,70 @@ spec:
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
quota:
|
||||
description: Quota specifies the resource limits for clusters within
|
||||
a clusterset.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: |-
|
||||
hard is the set of desired hard limits for each named resource.
|
||||
More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: |-
|
||||
scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
|
||||
but expressed using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: |-
|
||||
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
|
||||
that relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: |-
|
||||
Represents a scope's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
An array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
scopes:
|
||||
description: |-
|
||||
A collection of filters that must match each object tracked by a quota.
|
||||
If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the ClusterSet.
|
||||
@@ -206,6 +322,9 @@ spec:
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Name must match 'default'
|
||||
rule: self.metadata.name == "default"
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewClusterCommand() *cli.Command {
|
||||
func NewClusterCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterCreateCmd(),
|
||||
NewClusterDeleteCmd(),
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -17,12 +17,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type CreateConfig struct {
|
||||
@@ -38,9 +37,10 @@ type CreateConfig struct {
|
||||
version string
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
clusterset string
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd() *cli.Command {
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
createFlags := NewCreateFlags(createConfig)
|
||||
|
||||
@@ -48,15 +48,16 @@ func NewClusterCreateCmd() *cli.Command {
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(createConfig),
|
||||
Flags: append(CommonFlags, createFlags...),
|
||||
Action: createAction(appCtx, createConfig),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
@@ -67,16 +68,38 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
// if clusterset is set, use the namespace of the clusterset
|
||||
if config.clusterset != "" {
|
||||
namespace = appCtx.Namespace(config.clusterset)
|
||||
}
|
||||
|
||||
if err := createNamespace(ctx, client, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
// if clusterset is set, create the cluster set
|
||||
if config.clusterset != "" {
|
||||
namespace = appCtx.Namespace(config.clusterset)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: "default", Namespace: namespace}, clusterSet); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterSet, err = createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), config.clusterset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("ClusterSet in namespace [%s] available", namespace)
|
||||
|
||||
if !slices.Contains(clusterSet.Spec.AllowedModeTypes, v1alpha1.ClusterMode(config.mode)) {
|
||||
return fmt.Errorf("invalid '%s' Cluster mode. ClusterSet only allows %v", config.mode, clusterSet.Spec.AllowedModeTypes)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(config.version, "+") {
|
||||
@@ -86,25 +109,25 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
}
|
||||
|
||||
if config.token != "" {
|
||||
logrus.Infof("Creating cluster token secret")
|
||||
logrus.Info("Creating cluster token secret")
|
||||
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, Namespace())
|
||||
obj := k3kcluster.TokenSecretObj(config.token, name, namespace)
|
||||
|
||||
if err := ctrlClient.Create(ctx, &obj); err != nil {
|
||||
if err := client.Create(ctx, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating a new cluster [%s]", name)
|
||||
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := newCluster(name, Namespace(), config)
|
||||
cluster := newCluster(name, namespace, config)
|
||||
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,7 +139,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
|
||||
cluster.Spec.TLSSANs = []string{host[0]}
|
||||
|
||||
if err := ctrlClient.Create(ctx, cluster); err != nil {
|
||||
if err := client.Create(ctx, cluster); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("Cluster [%s] already exists", name)
|
||||
} else {
|
||||
@@ -140,29 +163,13 @@ func createAction(config *CreateConfig) cli.ActionFunc {
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, cluster, host[0])
|
||||
kubeconfig, err = cfg.Extract(ctx, client, cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
`, filepath.Join(pwd, cluster.Name+"-kubeconfig.yaml"))
|
||||
|
||||
kubeconfigData, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(cluster.Name+"-kubeconfig.yaml", kubeconfigData, 0644)
|
||||
return writeKubeconfigFile(cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -94,5 +94,10 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &config.kubeconfigServerHost,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "clusterset",
|
||||
Usage: "The clusterset to create the cluster in",
|
||||
Destination: &config.clusterset,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,56 +6,112 @@ import (
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
func NewClusterDeleteCmd() *cli.Command {
|
||||
var keepData bool
|
||||
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete,
|
||||
Flags: CommonFlags,
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, &cli.BoolFlag{
|
||||
Name: "keep-data",
|
||||
Usage: "keeps persistence volumes created for the cluster after deletion",
|
||||
Destination: &keepData,
|
||||
}),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func delete(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
func delete(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// keep bootstrap secrets and tokens if --keep-data flag is passed
|
||||
if keepData {
|
||||
// skip removing tokenSecret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip removing webhook secret
|
||||
if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"})
|
||||
listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace}
|
||||
matchingLabels.ApplyToList(&listOpts)
|
||||
deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts}
|
||||
|
||||
if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, &cluster); err != nil {
|
||||
return ctrlclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("deleting [%s] cluster", name)
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: Namespace(),
|
||||
},
|
||||
}
|
||||
|
||||
return ctrlClient.Delete(ctx, &cluster)
|
||||
}
|
||||
|
||||
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
|
||||
var secret v1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if err := cl.Get(ctx, key, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("%s secret is not found", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerutil.HasControllerReference(&secret) {
|
||||
if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cl.Update(ctx, &secret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
16
cli/cmds/clusterset.go
Normal file
16
cli/cmds/clusterset.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func NewClusterSetCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "clusterset",
|
||||
Usage: "clusterset command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterSetCreateCmd(appCtx),
|
||||
NewClusterSetDeleteCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
138
cli/cmds/clusterset_create.go
Normal file
138
cli/cmds/clusterset_create.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type ClusterSetCreateConfig struct {
|
||||
mode string
|
||||
displayName string
|
||||
}
|
||||
|
||||
func NewClusterSetCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
config := &ClusterSetCreateConfig{}
|
||||
|
||||
createFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "The allowed mode type of the clusterset",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "display-name",
|
||||
Usage: "The display name of the clusterset",
|
||||
Destination: &config.displayName,
|
||||
},
|
||||
}
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new clusterset",
|
||||
UsageText: "k3kcli clusterset create [command options] NAME",
|
||||
Action: clusterSetCreateAction(appCtx, config),
|
||||
Flags: WithCommonFlags(appCtx, createFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func clusterSetCreateAction(appCtx *AppContext, config *ClusterSetCreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
displayName := config.displayName
|
||||
if displayName == "" {
|
||||
displayName = name
|
||||
}
|
||||
|
||||
// if both display name and namespace are set the name is ignored
|
||||
if config.displayName != "" && appCtx.namespace != "" {
|
||||
logrus.Warnf("Ignoring name [%s] because display name and namespace are set", name)
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
if err := createNamespace(ctx, client, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := createClusterSet(ctx, client, namespace, v1alpha1.ClusterMode(config.mode), displayName)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(ctx context.Context, client client.Client, name string) error {
|
||||
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof(`Creating namespace [%s]`, name)
|
||||
|
||||
if err := client.Create(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClusterSet(ctx context.Context, client client.Client, namespace string, mode v1alpha1.ClusterMode, displayName string) (*v1alpha1.ClusterSet, error) {
|
||||
logrus.Infof("Creating clusterset in namespace [%s]", namespace)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterSet",
|
||||
APIVersion: "k3k.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{mode},
|
||||
DisplayName: displayName,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Create(ctx, clusterSet); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
logrus.Infof("ClusterSet in namespace [%s] already exists", namespace)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return clusterSet, nil
|
||||
}
|
||||
61
cli/cmds/clusterset_delete.go
Normal file
61
cli/cmds/clusterset_delete.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func NewClusterSetDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing clusterset",
|
||||
UsageText: "k3kcli clusterset delete [command options] NAME",
|
||||
Action: clusterSetDeleteAction(appCtx),
|
||||
Flags: WithCommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
}
|
||||
|
||||
func clusterSetDeleteAction(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
namespace := appCtx.Namespace(name)
|
||||
|
||||
logrus.Infof("Deleting clusterset in namespace [%s]", namespace)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Delete(ctx, clusterSet); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("ClusterSet not found in namespace [%s]", namespace)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -73,87 +72,88 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var subcommands = []*cli.Command{
|
||||
{
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: []*cli.Command{
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate,
|
||||
Flags: append(CommonFlags, generateKubeconfigFlags...),
|
||||
},
|
||||
}
|
||||
|
||||
func NewKubeconfigCommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: subcommands,
|
||||
Action: generate(appCtx),
|
||||
Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func generate(clx *cli.Context) error {
|
||||
var cluster v1alpha1.Cluster
|
||||
func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
ctx := context.Background()
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: appCtx.Namespace(name),
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{
|
||||
Scheme: Scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: Namespace(),
|
||||
}
|
||||
|
||||
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(restConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url, err := url.Parse(appCtx.RestConfig.Host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, client, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(&cluster, kubeconfig)
|
||||
}
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Extract(ctx, ctrlClient, &cluster, host[0])
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
pwd, err := os.Getwd()
|
||||
@@ -161,11 +161,7 @@ func generate(clx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if configName == "" {
|
||||
configName = cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
logrus.Infof(`You can start using the cluster with:
|
||||
|
||||
export KUBECONFIG=%s
|
||||
kubectl cluster-info
|
||||
|
||||
115
cli/cmds/root.go
115
cli/cmds/root.go
@@ -9,58 +9,51 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNamespace = "default"
|
||||
)
|
||||
type AppContext struct {
|
||||
RestConfig *rest.Config
|
||||
Client client.Client
|
||||
|
||||
var (
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
debug bool
|
||||
// Global flags
|
||||
Debug bool
|
||||
Kubeconfig string
|
||||
namespace string
|
||||
|
||||
CommonFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &Kubeconfig,
|
||||
Value: "$HOME/.kube/config",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Destination: &namespace,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(Scheme)
|
||||
_ = v1alpha1.AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
func NewApp() *cli.App {
|
||||
appCtx := &AppContext{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
app.Usage = "CLI for K3K"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
},
|
||||
}
|
||||
app.Flags = WithCommonFlags(appCtx)
|
||||
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if debug {
|
||||
if appCtx.Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,17 +63,55 @@ func NewApp() *cli.App {
|
||||
}
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
NewClusterCommand(),
|
||||
NewKubeconfigCommand(),
|
||||
NewClusterCmd(appCtx),
|
||||
NewClusterSetCmd(appCtx),
|
||||
NewKubeconfigCmd(appCtx),
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func Namespace() string {
|
||||
if namespace == "" {
|
||||
return defaultNamespace
|
||||
func (ctx *AppContext) Namespace(name string) string {
|
||||
if ctx.namespace != "" {
|
||||
return ctx.namespace
|
||||
}
|
||||
|
||||
return namespace
|
||||
return "k3k-" + name
|
||||
}
|
||||
|
||||
func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
|
||||
if kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
|
||||
return kubeConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag {
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &appCtx.Debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &appCtx.Kubeconfig,
|
||||
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace to create the k3k cluster in",
|
||||
Destination: &appCtx.namespace,
|
||||
},
|
||||
}
|
||||
|
||||
return append(commonFlags, flags...)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ k3kcli
|
||||
|
||||
```
|
||||
[--debug]
|
||||
[--kubeconfig]=[value]
|
||||
[--namespace]=[value]
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
@@ -20,6 +22,10 @@ k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
|
||||
# COMMANDS
|
||||
|
||||
@@ -39,7 +45,11 @@ Create new cluster
|
||||
|
||||
**--cluster-cidr**="": cluster CIDR
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
**--clusterset**="": The clusterset to create the cluster in
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
@@ -67,7 +77,43 @@ Delete an existing cluster
|
||||
|
||||
>k3kcli cluster delete [command options] NAME
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--keep-data**: keeps persistence volumes created for the cluster after deletion
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
## clusterset
|
||||
|
||||
clusterset command
|
||||
|
||||
### create
|
||||
|
||||
Create new clusterset
|
||||
|
||||
>k3kcli clusterset create [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--display-name**="": The display name of the clusterset
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--mode**="": The allowed mode type of the clusterset (default: "shared")
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing clusterset
|
||||
|
||||
>k3kcli clusterset delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace**="": namespace to create the k3k cluster in
|
||||
|
||||
@@ -85,9 +131,11 @@ Generate kubeconfig for clusters
|
||||
|
||||
**--config-name**="": the name of the generated kubeconfig file
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
|
||||
@@ -51,23 +51,6 @@ _Appears in:_
|
||||
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
|
||||
|
||||
|
||||
#### ClusterLimit
|
||||
|
||||
|
||||
|
||||
ClusterLimit defines resource limits for server and agent nodes.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
#### ClusterList
|
||||
|
||||
|
||||
@@ -124,12 +107,13 @@ _Appears in:_
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `clusterLimit` _[ClusterLimit](#clusterlimit)_ | Limit defines resource limits for server/agent nodes. | | |
|
||||
| `tokenSecretRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core)_ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.<br />The Secret must have a "token" field in its data. | | |
|
||||
| `tlsSANs` _string array_ | TLSSANs specifies subject alternative names for the K3s server certificate. | | |
|
||||
| `serverArgs` _string array_ | ServerArgs specifies ordered key-value pairs for K3s server pods.<br />Example: ["--tls-san=example.com"] | | |
|
||||
| `agentArgs` _string array_ | AgentArgs specifies ordered key-value pairs for K3s agent pods.<br />Example: ["--node-name=my-agent-node"] | | |
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -91,3 +91,64 @@ The required binaries for `envtest` are installed with [`setup-envtest`](https:/
|
||||
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
|
||||
|
||||
Remember also to update the CRDs documentation running the `make docs` command.
|
||||
|
||||
## How to install k3k on k3d
|
||||
|
||||
This document provides a guide on how to install k3k on [k3d](https://k3d.io).
|
||||
|
||||
### Installing k3d
|
||||
|
||||
Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation.
|
||||
|
||||
Create the k3d cluster in the following way:
|
||||
|
||||
```bash
|
||||
k3d cluster create k3k -p "30000-30010:30000-30010@server:0"
|
||||
```
|
||||
|
||||
With this syntax ports from 30000 to 30010 will be exposed on the host.
|
||||
|
||||
### Install k3k
|
||||
|
||||
Install now k3k as usual:
|
||||
|
||||
```bash
|
||||
helm repo update
|
||||
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
|
||||
```
|
||||
|
||||
### Create a virtual cluster
|
||||
|
||||
Once the k3k controller is up and running, create a namespace where to create our first virtual cluster.
|
||||
|
||||
```bash
|
||||
kubectl create ns k3k-mycluster
|
||||
```
|
||||
|
||||
Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step:
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: k3k.io/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: mycluster
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
expose:
|
||||
nodePort:
|
||||
serverPort: 30001
|
||||
EOF
|
||||
```
|
||||
|
||||
Check when the cluster is ready:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-mycluster
|
||||
```
|
||||
|
||||
Last thing to do is to get the kubeconfig to connect to the virtual cluster we've just created:
|
||||
|
||||
```bash
|
||||
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
|
||||
```
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: clusterset-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedNodeTypes:
|
||||
# allowedModeTypes:
|
||||
# - "shared"
|
||||
# - "virtual"
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
|
||||
2
go.mod
2
go.mod
@@ -33,6 +33,7 @@ require (
|
||||
k8s.io/client-go v0.29.11
|
||||
k8s.io/component-base v0.29.11
|
||||
k8s.io/component-helpers v0.29.11
|
||||
k8s.io/kubectl v0.29.11
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.17.5
|
||||
)
|
||||
@@ -207,7 +208,6 @@ require (
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.29.11 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kubectl v0.29.11 // indirect
|
||||
oras.land/oras-go v1.2.5 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
@@ -398,6 +399,11 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
}
|
||||
|
||||
|
||||
@@ -114,11 +114,6 @@ type ClusterSpec struct {
|
||||
// +optional
|
||||
PriorityClass string `json:"priorityClass,omitempty"`
|
||||
|
||||
// Limit defines resource limits for server/agent nodes.
|
||||
//
|
||||
// +optional
|
||||
Limit *ClusterLimit `json:"clusterLimit,omitempty"`
|
||||
|
||||
// TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster.
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
@@ -146,6 +141,16 @@ type ClusterSpec struct {
|
||||
//
|
||||
// +optional
|
||||
Addons []Addon `json:"addons,omitempty"`
|
||||
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
//
|
||||
// +optional
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
//
|
||||
// +optional
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
@@ -175,15 +180,6 @@ const (
|
||||
DynamicPersistenceMode = PersistenceMode("dynamic")
|
||||
)
|
||||
|
||||
// ClusterLimit defines resource limits for server and agent nodes.
|
||||
type ClusterLimit struct {
|
||||
// ServerLimit specifies resource limits for server nodes.
|
||||
ServerLimit v1.ResourceList `json:"serverLimit,omitempty"`
|
||||
|
||||
// WorkerLimit specifies resource limits for agent nodes.
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
}
|
||||
|
||||
// Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
type Addon struct {
|
||||
// SecretNamespace is the namespace of the Secret.
|
||||
@@ -317,6 +313,9 @@ type ClusterList struct {
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:validation:XValidation:rule="self.metadata.name == \"default\"",message="Name must match 'default'"
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name=Display Name,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date
|
||||
|
||||
// ClusterSet represents a group of virtual Kubernetes clusters managed by k3k.
|
||||
// It allows defining common configurations and constraints for the clusters within the set.
|
||||
@@ -338,10 +337,21 @@ type ClusterSet struct {
|
||||
// ClusterSetSpec defines the desired state of a ClusterSet.
|
||||
type ClusterSetSpec struct {
|
||||
|
||||
// DefaultLimits specifies the default resource limits for servers/agents when a cluster in the set doesn't provide any.
|
||||
// DisplayName is the human-readable name for the set.
|
||||
//
|
||||
// +optional
|
||||
DefaultLimits *ClusterLimit `json:"defaultLimits,omitempty"`
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
|
||||
// Quota specifies the resource limits for clusters within a clusterset.
|
||||
//
|
||||
// +optional
|
||||
Quota *v1.ResourceQuotaSpec `json:"quota,omitempty"`
|
||||
|
||||
// Limit specifies the LimitRange that will be applied to all pods within the ClusterSet
|
||||
// to set defaults and constraints (min/max)
|
||||
//
|
||||
// +optional
|
||||
Limit *v1.LimitRangeSpec `json:"limit,omitempty"`
|
||||
|
||||
// DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the set.
|
||||
//
|
||||
@@ -353,18 +363,13 @@ type ClusterSetSpec struct {
|
||||
// +optional
|
||||
DefaultPriorityClass string `json:"defaultPriorityClass,omitempty"`
|
||||
|
||||
// MaxLimits specifies the maximum resource limits that apply to all clusters (server + agent) in the set.
|
||||
//
|
||||
// +optional
|
||||
MaxLimits v1.ResourceList `json:"maxLimits,omitempty"`
|
||||
|
||||
// AllowedNodeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
|
||||
// AllowedModeTypes specifies the allowed cluster provisioning modes. Defaults to [shared].
|
||||
//
|
||||
// +kubebuilder:default={shared}
|
||||
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// +optional
|
||||
AllowedNodeTypes []ClusterMode `json:"allowedNodeTypes,omitempty"`
|
||||
AllowedModeTypes []ClusterMode `json:"allowedModeTypes,omitempty"`
|
||||
|
||||
// DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
|
||||
//
|
||||
|
||||
@@ -55,36 +55,6 @@ func (in *Cluster) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterLimit) DeepCopyInto(out *ClusterLimit) {
|
||||
*out = *in
|
||||
if in.ServerLimit != nil {
|
||||
in, out := &in.ServerLimit, &out.ServerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.WorkerLimit != nil {
|
||||
in, out := &in.WorkerLimit, &out.WorkerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLimit.
|
||||
func (in *ClusterLimit) DeepCopy() *ClusterLimit {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterLimit)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterList) DeepCopyInto(out *ClusterList) {
|
||||
*out = *in
|
||||
@@ -182,16 +152,14 @@ func (in *ClusterSetList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
|
||||
*out = *in
|
||||
if in.MaxLimits != nil {
|
||||
in, out := &in.MaxLimits, &out.MaxLimits
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(v1.ResourceQuotaSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultLimits != nil {
|
||||
in, out := &in.DefaultLimits, &out.DefaultLimits
|
||||
*out = new(ClusterLimit)
|
||||
if in.Limit != nil {
|
||||
in, out := &in.Limit, &out.Limit
|
||||
*out = new(v1.LimitRangeSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DefaultNodeSelector != nil {
|
||||
@@ -201,8 +169,8 @@ func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AllowedNodeTypes != nil {
|
||||
in, out := &in.AllowedNodeTypes, &out.AllowedNodeTypes
|
||||
if in.AllowedModeTypes != nil {
|
||||
in, out := &in.AllowedModeTypes, &out.AllowedModeTypes
|
||||
*out = make([]ClusterMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
@@ -260,6 +228,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -267,16 +241,16 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Limit != nil {
|
||||
in, out := &in.Limit, &out.Limit
|
||||
*out = new(ClusterLimit)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TokenSecretRef != nil {
|
||||
in, out := &in.TokenSecretRef, &out.TokenSecretRef
|
||||
*out = new(v1.SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ServerArgs != nil {
|
||||
in, out := &in.ServerArgs, &out.ServerArgs
|
||||
*out = make([]string, len(*in))
|
||||
@@ -287,21 +261,24 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TLSSANs != nil {
|
||||
in, out := &in.TLSSANs, &out.TLSSANs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Addons != nil {
|
||||
in, out := &in.Addons, &out.Addons
|
||||
*out = make([]Addon, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Expose != nil {
|
||||
in, out := &in.Expose, &out.Expose
|
||||
*out = new(ExposeConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
if in.ServerLimit != nil {
|
||||
in, out := &in.ServerLimit, &out.ServerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.WorkerLimit != nil {
|
||||
in, out := &in.WorkerLimit, &out.WorkerLimit
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -228,5 +228,12 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
},
|
||||
}
|
||||
|
||||
// specify resource limits if specified for the servers.
|
||||
if v.cluster.Spec.WorkerLimit != nil {
|
||||
podSpec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: v.cluster.Spec.WorkerLimit,
|
||||
}
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -121,6 +123,11 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
|
||||
log.Info("server not ready, requeueing")
|
||||
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, reconcilerErr
|
||||
}
|
||||
|
||||
@@ -202,6 +209,10 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
service, err := c.ensureClusterService(ctx, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -300,6 +311,84 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policy")
|
||||
|
||||
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NetworkPolicy",
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PolicyTypes: []networkingv1.PolicyType{
|
||||
networkingv1.PolicyTypeIngress,
|
||||
networkingv1.PolicyTypeEgress,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: "0.0.0.0/0",
|
||||
Except: []string{cluster.Status.ClusterCIDR},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": cluster.Namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
|
||||
},
|
||||
},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"k8s-app": "kube-dns",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentNetworkPolicy.Spec = expectedNetworkPolicy.Spec
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := client.ObjectKeyFromObject(currentNetworkPolicy)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster network policy updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring cluster service")
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -18,7 +20,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Cluster Controller", func() {
|
||||
var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), func() {
|
||||
|
||||
Context("creating a Cluster", func() {
|
||||
|
||||
@@ -55,7 +57,7 @@ var _ = Describe("Cluster Controller", func() {
|
||||
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
|
||||
Expect(cluster.Spec.Version).To(BeEmpty())
|
||||
// TOFIX
|
||||
//Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicNodesType))
|
||||
// Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
|
||||
|
||||
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -70,9 +72,27 @@ var _ = Describe("Cluster Controller", func() {
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(expectedHostVersion))
|
||||
|
||||
// check NetworkPolicy
|
||||
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(expectedNetworkPolicy), expectedNetworkPolicy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
spec := expectedNetworkPolicy.Spec
|
||||
Expect(spec.PolicyTypes).To(HaveLen(2))
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
|
||||
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
|
||||
|
||||
Expect(spec.Ingress).To(Equal([]networkingv1.NetworkPolicyIngressRule{{}}))
|
||||
})
|
||||
|
||||
When("exposing the cluster with nodePort and custom posrts", func() {
|
||||
When("exposing the cluster with nodePort and custom ports", func() {
|
||||
It("will have a NodePort service with the specified port exposed", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var ErrServerNotReady = errors.New("server not ready")
|
||||
|
||||
type ControlRuntimeBootstrap struct {
|
||||
ServerCA content `json:"serverCA"`
|
||||
ServerCAKey content `json:"serverCAKey"`
|
||||
@@ -68,6 +71,10 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.ECONNREFUSED) {
|
||||
return nil, ErrServerNotReady
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
@@ -46,11 +46,6 @@ func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *S
|
||||
}
|
||||
|
||||
func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
|
||||
var limit v1.ResourceList
|
||||
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
|
||||
limit = s.cluster.Spec.Limit.ServerLimit
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
PriorityClassName: s.cluster.Spec.PriorityClass,
|
||||
@@ -118,9 +113,6 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: limit,
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
@@ -220,6 +212,13 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
}
|
||||
}
|
||||
|
||||
// specify resource limits if specified for the servers.
|
||||
if s.cluster.Spec.ServerLimit != nil {
|
||||
podSpec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: s.cluster.Spec.ServerLimit,
|
||||
}
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
@@ -32,7 +31,7 @@ const (
|
||||
)
|
||||
|
||||
type ClusterSetReconciler struct {
|
||||
Client ctrlruntimeclient.Client
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
ClusterCIDR string
|
||||
}
|
||||
@@ -49,6 +48,7 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.ClusterSet{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&v1.ResourceQuota{}).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
@@ -59,54 +59,33 @@ func Add(ctx context.Context, mgr manager.Manager, clusterCIDR string) error {
|
||||
).
|
||||
Watches(
|
||||
&v1alpha1.Cluster{},
|
||||
handler.EnqueueRequestsFromMapFunc(sameNamespaceEventHandler(reconciler)),
|
||||
handler.EnqueueRequestsFromMapFunc(namespaceEventHandler(reconciler)),
|
||||
).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// namespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
// namespaceEventHandler will enqueue a reconcile request for the ClusterSet in the given namespace
|
||||
func namespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
// if the object is a Namespace, use the name as the namespace
|
||||
namespace := obj.GetName()
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetName()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: clusterSet.Name,
|
||||
Namespace: obj.GetName(),
|
||||
},
|
||||
})
|
||||
// if the object is a namespaced resource, use the namespace
|
||||
if obj.GetNamespace() != "" {
|
||||
namespace = obj.GetNamespace()
|
||||
}
|
||||
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
// sameNamespaceEventHandler will enqueue reconciling requests for all the ClusterSets in the changed namespace
|
||||
func sameNamespaceEventHandler(reconciler ClusterSetReconciler) handler.MapFunc {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var (
|
||||
requests []reconcile.Request
|
||||
set v1alpha1.ClusterSetList
|
||||
)
|
||||
|
||||
_ = reconciler.Client.List(ctx, &set, client.InNamespace(obj.GetNamespace()))
|
||||
|
||||
for _, clusterSet := range set.Items {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: clusterSet.Name,
|
||||
Namespace: obj.GetNamespace(),
|
||||
},
|
||||
})
|
||||
key := types.NamespacedName{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
return requests
|
||||
var clusterSet v1alpha1.ClusterSet
|
||||
if err := reconciler.Client.Get(ctx, key, &clusterSet); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return []reconcile.Request{{NamespacedName: key}}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,42 +110,56 @@ func (c *ClusterSetReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if err := c.reconcileNetworkPolicy(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
orig := clusterSet.DeepCopy()
|
||||
|
||||
reconcilerErr := c.reconcileClusterSet(ctx, &clusterSet)
|
||||
|
||||
// update Status if needed
|
||||
if !reflect.DeepEqual(orig.Status, clusterSet.Status) {
|
||||
if err := c.Client.Status().Update(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.reconcileNamespacePodSecurityLabels(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
return reconcile.Result{}, reconcilerErr
|
||||
}
|
||||
|
||||
if err := c.reconcileClusters(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
// update ClusterSet if needed
|
||||
if !reflect.DeepEqual(orig.Spec, clusterSet.Spec) {
|
||||
if err := c.Client.Update(ctx, &clusterSet); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add resource quota for clustersets
|
||||
// if clusterSet.Spec.MaxLimits != nil {
|
||||
// quota := v1.ResourceQuota{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "clusterset-quota",
|
||||
// Namespace: clusterSet.Namespace,
|
||||
// OwnerReferences: []metav1.OwnerReference{
|
||||
// {
|
||||
// UID: clusterSet.UID,
|
||||
// Name: clusterSet.Name,
|
||||
// APIVersion: clusterSet.APIVersion,
|
||||
// Kind: clusterSet.Kind,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// quota.Spec.Hard = clusterSet.Spec.MaxLimits
|
||||
// if err := c.Client.Create(ctx, "a); err != nil {
|
||||
// return reconcile.Result{}, fmt.Errorf("unable to create resource quota from cluster set: %w", err)
|
||||
// }
|
||||
// }
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileClusterSet(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
if err := c.reconcileNetworkPolicy(ctx, clusterSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileNamespacePodSecurityLabels(ctx, clusterSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileLimit(ctx, clusterSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileQuota(ctx, clusterSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.reconcileClusters(ctx, clusterSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileNetworkPolicy(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
@@ -315,7 +308,7 @@ func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, clusterSet
|
||||
log.Info("reconciling Clusters")
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, ctrlruntimeclient.InNamespace(clusterSet.Namespace)); err != nil {
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(clusterSet.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -340,3 +333,98 @@ func (c *ClusterSetReconciler) reconcileClusters(ctx context.Context, clusterSet
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileQuota(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
if clusterSet.Spec.Quota == nil {
|
||||
// check if resourceQuota object exists and deletes it.
|
||||
var toDeleteResourceQuota v1.ResourceQuota
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: clusterSet.Namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteResourceQuota); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteResourceQuota)
|
||||
}
|
||||
|
||||
// create/update resource Quota
|
||||
resourceQuota := resourceQuota(clusterSet)
|
||||
|
||||
if err := ctrl.SetControllerReference(clusterSet, &resourceQuota, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, &resourceQuota); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, &resourceQuota)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceQuota(clusterSet *v1alpha1.ClusterSet) v1.ResourceQuota {
|
||||
return v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: clusterSet.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ResourceQuota",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: *clusterSet.Spec.Quota,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterSetReconciler) reconcileLimit(ctx context.Context, clusterSet *v1alpha1.ClusterSet) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("Reconciling ClusterSet Limit")
|
||||
|
||||
// delete limitrange if spec.limits isnt specified.
|
||||
if clusterSet.Spec.Limit == nil {
|
||||
var toDeleteLimitRange v1.LimitRange
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: clusterSet.Namespace,
|
||||
}
|
||||
|
||||
if err := c.Client.Get(ctx, key, &toDeleteLimitRange); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteLimitRange)
|
||||
}
|
||||
|
||||
limitRange := limitRange(clusterSet)
|
||||
if err := ctrl.SetControllerReference(clusterSet, &limitRange, c.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Client.Create(ctx, &limitRange); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return c.Client.Update(ctx, &limitRange)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func limitRange(clusterSet *v1alpha1.ClusterSet) v1.LimitRange {
|
||||
return v1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: clusterSet.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "LimitRange",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: *clusterSet.Spec.Limit,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ClusterSet Controller", func() {
|
||||
var _ = Describe("ClusterSet Controller", Label("controller"), Label("ClusterSet"), func() {
|
||||
|
||||
Context("creating a ClusterSet", func() {
|
||||
|
||||
@@ -29,34 +29,62 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
createdNS := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err := k8sClient.Create(context.Background(), createdNS)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
namespace = createdNS.Name
|
||||
})
|
||||
|
||||
When("created with a default spec", func() {
|
||||
It("should have only the 'shared' allowedNodeTypes", func() {
|
||||
It("should have only the 'shared' allowedModeTypes", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.SharedClusterMode))
|
||||
})
|
||||
|
||||
It("should not be able to create a cluster with a non 'default' name", func() {
|
||||
err := k8sClient.Create(ctx, &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "another-name",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should not be able to create two ClusterSets in the same namespace", func() {
|
||||
err := k8sClient.Create(ctx, &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = k8sClient.Create(ctx, &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-2",
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should create a NetworkPolicy", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -118,9 +146,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
When("created with DisableNetworkPolicy", func() {
|
||||
It("should not create a NetworkPolicy if true", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DisableNetworkPolicy: true,
|
||||
@@ -147,9 +175,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
|
||||
It("should delete the NetworkPolicy if changed to false", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -191,9 +219,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
|
||||
It("should recreate the NetworkPolicy if deleted", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -242,12 +270,12 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
When("created specifying the mode", func() {
|
||||
It("should have the 'virtual' mode if specified", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
},
|
||||
@@ -256,19 +284,19 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(1))
|
||||
Expect(allowedModeTypes).To(ContainElement(v1alpha1.VirtualClusterMode))
|
||||
})
|
||||
|
||||
It("should have both modes if specified", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
@@ -278,7 +306,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
allowedModeTypes := clusterSet.Spec.AllowedNodeTypes
|
||||
allowedModeTypes := clusterSet.Spec.AllowedModeTypes
|
||||
Expect(allowedModeTypes).To(HaveLen(2))
|
||||
Expect(allowedModeTypes).To(ContainElements(
|
||||
v1alpha1.SharedClusterMode,
|
||||
@@ -288,12 +316,12 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
|
||||
It("should fail for a non-existing mode", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
AllowedNodeTypes: []v1alpha1.ClusterMode{
|
||||
AllowedModeTypes: []v1alpha1.ClusterMode{
|
||||
v1alpha1.SharedClusterMode,
|
||||
v1alpha1.VirtualClusterMode,
|
||||
v1alpha1.ClusterMode("non-existing"),
|
||||
@@ -315,9 +343,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
)
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
@@ -327,7 +355,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var ns corev1.Namespace
|
||||
var ns v1.Namespace
|
||||
|
||||
// Check privileged
|
||||
|
||||
@@ -418,9 +446,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
|
||||
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
PodSecurityAdmissionLevel: &privileged,
|
||||
@@ -430,7 +458,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var ns corev1.Namespace
|
||||
var ns v1.Namespace
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
Eventually(func() bool {
|
||||
@@ -469,9 +497,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
When("a cluster in the same namespace is present", func() {
|
||||
It("should update it if needed", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
@@ -482,7 +510,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -510,9 +538,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
|
||||
It("should update the nodeSelector", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
@@ -523,7 +551,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -551,9 +579,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
@@ -564,7 +592,7 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -622,9 +650,9 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
When("a cluster in a different namespace is present", func() {
|
||||
It("should not be update", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "clusterset-",
|
||||
Namespace: namespace,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
@@ -634,12 +662,12 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
namespace2 := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
namespace2 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err = k8sClient.Create(ctx, namespace2)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace2.Name,
|
||||
},
|
||||
@@ -666,5 +694,134 @@ var _ = Describe("ClusterSet Controller", func() {
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("created with ResourceQuota", func() {
|
||||
It("should create resourceQuota if Quota is enabled", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
Expect(resourceQuota.Spec.Hard.Cpu().String()).To(BeEquivalentTo("800m"))
|
||||
Expect(resourceQuota.Spec.Hard.Memory().String()).To(BeEquivalentTo("1Gi"))
|
||||
})
|
||||
|
||||
It("should delete the ResourceQuota if Quota is deleted", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
Quota: &v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("800m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
clusterSet.Spec.Quota = nil
|
||||
err = k8sClient.Update(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("should create resourceQuota if Quota is enabled", func() {
|
||||
clusterSet := &v1alpha1.ClusterSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSetSpec{
|
||||
Limit: &v1.LimitRangeSpec{
|
||||
Limits: []v1.LimitRangeItem{
|
||||
{
|
||||
Type: v1.LimitTypeContainer,
|
||||
DefaultRequest: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("500m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, clusterSet)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var limitRange v1.LimitRange
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterSet.Name),
|
||||
Namespace: namespace,
|
||||
}
|
||||
return k8sClient.Get(ctx, key, &limitRange)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
// make sure that default limit range has the default requet values.
|
||||
Expect(limitRange.Spec.Limits).ShouldNot(BeEmpty())
|
||||
cpu := limitRange.Spec.Limits[0].DefaultRequest.Cpu().String()
|
||||
Expect(cpu).To(BeEquivalentTo("500m"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
96
tests/cluster_network_test.go
Normal file
96
tests/cluster_network_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
|
||||
var (
|
||||
cluster1 *VirtualCluster
|
||||
cluster2 *VirtualCluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
clusters := NewVirtualClusters(2)
|
||||
cluster1 = clusters[0]
|
||||
cluster2 = clusters[1]
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(cluster1.Cluster.Namespace, cluster2.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create pods in each of them that are isolated", func() {
|
||||
|
||||
pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("")
|
||||
pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("")
|
||||
pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("")
|
||||
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
curlCmd string
|
||||
err error
|
||||
)
|
||||
|
||||
By("Checking that Pods can reach themselves")
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
|
||||
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
|
||||
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
|
||||
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
|
||||
|
||||
// Pods in the same Virtual Cluster should be able to reach each other
|
||||
// Pod1 should be able to call Pod2, and viceversa
|
||||
|
||||
By("Checking that Pods in the same virtual clusters can reach each other")
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
|
||||
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
|
||||
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
|
||||
|
||||
By("Checking that Pods in the different virtual clusters cannot reach each other")
|
||||
|
||||
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
|
||||
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
|
||||
Expect(err).Should(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
|
||||
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
|
||||
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
|
||||
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
|
||||
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
|
||||
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(stderr).To(ContainSubstring("Failed to connect"))
|
||||
})
|
||||
})
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var _ = When("k3k is installed", func() {
|
||||
var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
It("is in Running status", func() {
|
||||
|
||||
// check that the controller is running
|
||||
@@ -42,128 +41,44 @@ var _ = When("k3k is installed", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a ephemeral cluster is installed", func() {
|
||||
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
|
||||
var namespace string
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
namespace = createdNS.Name
|
||||
virtualCluster = NewVirtualCluster()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
TLSSANs: []string{hostIP},
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.EphemeralPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
NewVirtualCluster(cluster)
|
||||
|
||||
By("Waiting to get a kubernetes client for the virtual cluster")
|
||||
virtualK8sClient := NewVirtualK8sClient(cluster)
|
||||
|
||||
nginxPod := &corev1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "nginx",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// check that the nginx Pod is up and running in the host cluster
|
||||
Eventually(func() bool {
|
||||
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
|
||||
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
|
||||
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
|
||||
fmt.Fprintf(GinkgoWriter,
|
||||
"pod=%s resource=%s/%s status=%s\n",
|
||||
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
|
||||
)
|
||||
|
||||
return pod.Status.Phase == corev1.PodRunning
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("regenerates the bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
TLSSANs: []string{hostIP},
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.EphemeralPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
NewVirtualCluster(cluster)
|
||||
|
||||
By("Waiting to get a kubernetes client for the virtual cluster")
|
||||
virtualK8sClient := NewVirtualK8sClient(cluster)
|
||||
|
||||
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
|
||||
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
@@ -177,7 +92,7 @@ var _ = When("a ephemeral cluster is installed", func() {
|
||||
By("Using old k8s client configuration should fail")
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
@@ -188,8 +103,8 @@ var _ = When("a ephemeral cluster is installed", func() {
|
||||
By("Recover new config should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
virtualK8sClient = NewVirtualK8sClient(cluster)
|
||||
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -200,126 +115,52 @@ var _ = When("a ephemeral cluster is installed", func() {
|
||||
|
||||
var _ = When("a dynamic cluster is installed", func() {
|
||||
|
||||
var namespace string
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
namespace = createdNS.Name
|
||||
namespace := NewNamespace()
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
|
||||
CreateCluster(cluster)
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
TLSSANs: []string{hostIP},
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.DynamicPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
NewVirtualCluster(cluster)
|
||||
|
||||
By("Waiting to get a kubernetes client for the virtual cluster")
|
||||
virtualK8sClient := NewVirtualK8sClient(cluster)
|
||||
|
||||
nginxPod := &corev1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "nginx",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// check that the nginx Pod is up and running in the host cluster
|
||||
Eventually(func() bool {
|
||||
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
|
||||
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
|
||||
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
|
||||
fmt.Fprintf(GinkgoWriter,
|
||||
"pod=%s resource=%s/%s status=%s\n",
|
||||
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
|
||||
)
|
||||
|
||||
return pod.Status.Phase == corev1.PodRunning
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("use the same bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster := v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
TLSSANs: []string{hostIP},
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.DynamicPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
NewVirtualCluster(cluster)
|
||||
|
||||
By("Waiting to get a kubernetes client for the virtual cluster")
|
||||
virtualK8sClient := NewVirtualK8sClient(cluster)
|
||||
|
||||
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
|
||||
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
@@ -333,8 +174,7 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
By("Using old k8s client configuration should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
virtualK8sClient = NewVirtualK8sClient(cluster)
|
||||
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(2 * time.Minute).
|
||||
|
||||
@@ -1,29 +1,142 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func NewVirtualCluster(cluster v1alpha1.Cluster) {
|
||||
type VirtualCluster struct {
|
||||
Cluster *v1alpha1.Cluster
|
||||
RestConfig *rest.Config
|
||||
Client *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func NewVirtualCluster() *VirtualCluster {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
|
||||
cluster := NewCluster(namespace.Name)
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
|
||||
return &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// NewVirtualClusters will create multiple Virtual Clusters asynchronously
|
||||
func NewVirtualClusters(n int) []*VirtualCluster {
|
||||
GinkgoHelper()
|
||||
|
||||
var clusters []*VirtualCluster
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(n)
|
||||
|
||||
for range n {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
clusters = append(clusters, NewVirtualCluster())
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return clusters
|
||||
}
|
||||
|
||||
func NewNamespace() *corev1.Namespace {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
func DeleteNamespaces(names ...string) {
|
||||
GinkgoHelper()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(names))
|
||||
|
||||
for _, name := range names {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
deleteNamespace(name)
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func deleteNamespace(name string) {
|
||||
GinkgoHelper()
|
||||
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
func NewCluster(namespace string) *v1alpha1.Cluster {
|
||||
return &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
TLSSANs: []string{hostIP},
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
Type: v1alpha1.EphemeralPersistenceMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateCluster(cluster *v1alpha1.Cluster) {
|
||||
GinkgoHelper()
|
||||
|
||||
ctx := context.Background()
|
||||
err := k8sClient.Create(ctx, &cluster)
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// check that the server Pod and the Kubelet are in Ready state
|
||||
@@ -58,7 +171,13 @@ func NewVirtualCluster(cluster v1alpha1.Cluster) {
|
||||
}
|
||||
|
||||
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
|
||||
func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
|
||||
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
|
||||
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
|
||||
return virtualK8sClient
|
||||
}
|
||||
|
||||
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
|
||||
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
|
||||
GinkgoHelper()
|
||||
|
||||
var (
|
||||
@@ -72,7 +191,7 @@ func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
|
||||
vKubeconfig := kubeconfig.New()
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
|
||||
config, err = vKubeconfig.Extract(ctx, k8sClient, &cluster, hostIP)
|
||||
config, err = vKubeconfig.Extract(ctx, k8sClient, cluster, hostIP)
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -87,5 +206,97 @@ func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
|
||||
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return virtualK8sClient
|
||||
return virtualK8sClient, restcfg
|
||||
}
|
||||
|
||||
func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
|
||||
GinkgoHelper()
|
||||
|
||||
if namespace == "" {
|
||||
namespace = "default"
|
||||
}
|
||||
|
||||
nginxPod := &corev1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
By("Creating Pod")
|
||||
|
||||
ctx := context.Background()
|
||||
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var podIP string
|
||||
|
||||
// check that the nginx Pod is up and running in the host cluster
|
||||
Eventually(func() bool {
|
||||
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
|
||||
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
|
||||
podIP = pod.Status.PodIP
|
||||
|
||||
fmt.Fprintf(GinkgoWriter,
|
||||
"pod=%s resource=%s/%s status=%s podIP=%s\n",
|
||||
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
|
||||
)
|
||||
|
||||
return pod.Status.Phase == corev1.PodRunning && podIP != ""
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
// get the running pod from the virtual cluster
|
||||
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return nginxPod, podIP
|
||||
}
|
||||
|
||||
// ExecCmd exec command on specific pod and wait the command's output.
|
||||
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
|
||||
option := &corev1.PodExecOptions{
|
||||
Command: []string{"sh", "-c", command},
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
}
|
||||
|
||||
req := c.Client.CoreV1().RESTClient().Post().Resource("pods").Name(pod.Name).Namespace(pod.Namespace).SubResource("exec")
|
||||
req.VersionedParams(option, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(c.RestConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
stdout := &bytes.Buffer{}
|
||||
stderr := &bytes.Buffer{}
|
||||
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
})
|
||||
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user