Compare commits

..

10 Commits

Author SHA1 Message Date
Hussein Galal
853b0a7e05 Chart update for 0.3.1 (#309)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-03-21 01:59:53 +02:00
Rossella Sblendido
28b15d2e92 Merge pull request #299 from rancher/doc-k3d
Add documentation to install k3k on k3d
2025-03-10 11:07:02 +01:00
rossella
cad59c0494 Moved how to to development.md 2025-03-07 17:38:22 +01:00
Enrico Candino
d0810af17c Fix kubeconfig load from multiple configuration files (#301)
* fix kubeconfig load with standards kubectl approach

* update cli docs
2025-03-07 15:11:00 +01:00
Enrico Candino
2b7202e676 Added NetworkPolicy for Cluster isolation (#290)
* added cluster NetworkPolicy

* wip tests

* remove focus

* added networking test

* test refactoring

* unfocus

* revert labels

* added async creation of clusters, and namespace deletion

* add unfocus validation
2025-03-07 14:36:49 +01:00
rossella
4975b0b799 Add documentation to install k3k on k3d
k3d is a valid local option for testing k3k. Adding documentation
on how to install k3k on top of k3d.
2025-03-04 16:48:21 +01:00
jpgouin
90d17cd6dd Merge pull request #288 from jp-gouin/fix-cli 2025-03-04 13:48:34 +01:00
Justin J. Janes
3e5e9c7965 adding a new issue template for bug reports (#258)
* adding a new issue template for bug reports

* adding extra helper commands for logs from k3k
2025-03-04 12:33:34 +02:00
jpgouin
1d027909ee cli - fix bug with Kubeconfig value resolution 2025-03-04 10:23:54 +00:00
Enrico Candino
6105402bf2 charts-0.3.1-r1 (#283) 2025-03-03 17:14:13 +01:00
15 changed files with 591 additions and 225 deletions

41
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,41 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
<!-- Thanks for helping us to improve K3K! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Environmental Info:**
Host Cluster Version:
<!-- For example K3S v1.32.1+k3s1 or RKE2 v1.31.5+rke2r1 -->
Node(s) CPU architecture, OS, and Version:
<!-- Provide the output from "uname -a" on the node(s) -->
Host Cluster Configuration:
<!-- Provide some basic information on the cluster configuration. For example, "1 servers, 2 agents CNI: Flannel". -->
K3K Cluster Configuration:
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
**Describe the bug:**
<!-- A clear and concise description of what the bug is. -->
**Steps To Reproduce:**
- Created a cluster with `k3k create`:
**Expected behavior:**
<!-- A clear and concise description of what you expected to happen. -->
**Actual behavior:**
<!-- A clear and concise description of what actually happened. -->
**Additional context / logs:**
<!-- Add any other context and/or logs about the problem here. -->
<!-- kubectl logs -n k3k-system -l app.kubernetes.io/instance=k3k -->
<!-- $ kubectl logs -n <cluster-namespace> k3k-<cluster-name>-server-0 -->
<!-- $ kubectl logs -n <cluster-namespace> -l cluster=<cluster-name>,mode=shared # in shared mode -->

View File

@@ -54,7 +54,7 @@ push-%:
.PHONY: test
test: ## Run all the tests
$(GINKGO) -v -r
$(GINKGO) -v -r --label-filter=$(label-filter)
.PHONY: test-unit
test-unit: ## Run the unit tests (skips the e2e)
@@ -87,6 +87,7 @@ lint: ## Find any linting issues in the project
.PHONY: validate
validate: build-crds docs ## Validate the project checking for any dependency or doc mismatch
$(GINKGO) unfocus
go mod tidy
git --no-pager diff go.mod go.sum
test -z "$(shell git status --porcelain)"

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.0-r1
appVersion: v0.3.0
version: 0.3.1-r2
appVersion: v0.3.1

View File

@@ -67,7 +67,7 @@ func createAction(config *CreateConfig) cli.ActionFunc {
return errors.New("invalid cluster name")
}
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
restConfig, err := loadRESTConfig()
if err != nil {
return err
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -36,7 +35,7 @@ func delete(clx *cli.Context) error {
return errors.New("invalid cluster name")
}
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
restConfig, err := loadRESTConfig()
if err != nil {
return err
}

View File

@@ -92,11 +92,7 @@ func NewKubeconfigCommand() *cli.Command {
}
func generate(clx *cli.Context) error {
var cluster v1alpha1.Cluster
ctx := context.Background()
restConfig, err := clientcmd.BuildConfigFromFlags("", Kubeconfig)
restConfig, err := loadRESTConfig()
if err != nil {
return err
}
@@ -113,6 +109,9 @@ func generate(clx *cli.Context) error {
Namespace: Namespace(),
}
var cluster v1alpha1.Cluster
ctx := context.Background()
if err := ctrlClient.Get(ctx, clusterKey, &cluster); err != nil {
return err
}

View File

@@ -9,6 +9,8 @@ import (
"github.com/urfave/cli/v2"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
@@ -25,10 +27,9 @@ var (
CommonFlags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVars: []string{"KUBECONFIG"},
Usage: "kubeconfig path",
Destination: &Kubeconfig,
Value: "$HOME/.kube/config",
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
},
&cli.StringFlag{
Name: "namespace",
@@ -84,3 +85,16 @@ func Namespace() string {
return namespace
}
func loadRESTConfig() (*rest.Config, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
if Kubeconfig != "" {
loadingRules.ExplicitPath = Kubeconfig
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
return kubeConfig.ClientConfig()
}

View File

@@ -39,7 +39,7 @@ Create new cluster
**--cluster-cidr**="": cluster CIDR
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host
@@ -67,7 +67,7 @@ Delete an existing cluster
>k3kcli cluster delete [command options] NAME
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--namespace**="": namespace to create the k3k cluster in
@@ -87,7 +87,7 @@ Generate kubeconfig for clusters
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
**--kubeconfig**="": kubeconfig path (default: "$HOME/.kube/config")
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
**--kubeconfig-server**="": override the kubeconfig server host

View File

@@ -91,3 +91,64 @@ The required binaries for `envtest` are installed with [`setup-envtest`](https:/
We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make build-crds`.
Remember also to update the CRDs documentation running the `make docs` command.
## How to install k3k on k3d
This document provides a guide on how to install k3k on [k3d](https://k3d.io).
### Installing k3d
Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation.
Create the k3d cluster in the following way:
```bash
k3d cluster create k3k -p "30000-30010:30000-30010@server:0"
```
With this syntax ports from 30000 to 30010 will be exposed on the host.
### Install k3k
Install now k3k as usual:
```bash
helm repo update
helm install --namespace k3k-system --create-namespace k3k k3k/k3k --devel
```
### Create a virtual cluster
Once the k3k controller is up and running, create a namespace where to create our first virtual cluster.
```bash
kubectl create ns k3k-mycluster
```
Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step:
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
kind: Cluster
metadata:
name: mycluster
namespace: k3k-mycluster
spec:
expose:
nodePort:
serverPort: 30001
EOF
```
Check when the cluster is ready:
```bash
kubectl get po -n k3k-mycluster
```
Last thing to do is to get the kubeconfig to connect to the virtual cluster we've just created:
```bash
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
```

View File

@@ -11,11 +11,13 @@ import (
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -202,6 +204,10 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
}
}
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
return err
}
service, err := c.ensureClusterService(ctx, cluster)
if err != nil {
return err
@@ -300,6 +306,84 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",
APIVersion: "networking.k8s.io/v1",
},
Spec: networkingv1.NetworkPolicySpec{
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
networkingv1.PolicyTypeEgress,
},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &networkingv1.IPBlock{
CIDR: "0.0.0.0/0",
Except: []string{cluster.Status.ClusterCIDR},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": cluster.Namespace,
},
},
},
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name": metav1.NamespaceSystem,
},
},
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "kube-dns",
},
},
},
},
},
},
},
}
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
return err
}
currentNetworkPolicy.Spec = expectedNetworkPolicy.Spec
return nil
})
if err != nil {
return err
}
key := client.ObjectKeyFromObject(currentNetworkPolicy)
if result != controllerutil.OperationResultNone {
log.Info("cluster network policy updated", "key", key, "result", result)
}
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")

View File

@@ -6,11 +6,13 @@ import (
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -18,7 +20,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster Controller", func() {
var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), func() {
Context("creating a Cluster", func() {
@@ -55,7 +57,7 @@ var _ = Describe("Cluster Controller", func() {
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
// TOFIX
//Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicNodesType))
// Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
@@ -70,9 +72,27 @@ var _ = Describe("Cluster Controller", func() {
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(expectedHostVersion))
// check NetworkPolicy
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
Namespace: cluster.Namespace,
},
}
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(expectedNetworkPolicy), expectedNetworkPolicy)
Expect(err).To(Not(HaveOccurred()))
spec := expectedNetworkPolicy.Spec
Expect(spec.PolicyTypes).To(HaveLen(2))
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeEgress))
Expect(spec.PolicyTypes).To(ContainElement(networkingv1.PolicyTypeIngress))
Expect(spec.Ingress).To(Equal([]networkingv1.NetworkPolicyIngressRule{{}}))
})
When("exposing the cluster with nodePort and custom posrts", func() {
When("exposing the cluster with nodePort and custom ports", func() {
It("will have a NodePort service with the specified port exposed", func() {
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{

View File

@@ -20,7 +20,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = Describe("ClusterSet Controller", func() {
var _ = Describe("ClusterSet Controller", Label("controller"), Label("ClusterSet"), func() {
Context("creating a ClusterSet", func() {

View File

@@ -0,0 +1,96 @@
package k3k_test
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
var (
cluster1 *VirtualCluster
cluster2 *VirtualCluster
)
BeforeEach(func() {
clusters := NewVirtualClusters(2)
cluster1 = clusters[0]
cluster2 = clusters[1]
})
AfterEach(func() {
DeleteNamespaces(cluster1.Cluster.Namespace, cluster2.Cluster.Namespace)
})
It("can create pods in each of them that are isolated", func() {
pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("")
pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("")
pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("")
var (
stdout string
stderr string
curlCmd string
err error
)
By("Checking that Pods can reach themselves")
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
// Pods in the same Virtual Cluster should be able to reach each other
// Pod1 should be able to call Pod2, and viceversa
By("Checking that Pods in the same virtual clusters can reach each other")
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(Not(HaveOccurred()))
Expect(stdout).To(ContainSubstring("Welcome to nginx!"))
By("Checking that Pods in the different virtual clusters cannot reach each other")
// Pods in Cluster 1 should not be able to reach the Pod in Cluster 2
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd)
Expect(err).Should(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
curlCmd = "curl --no-progress-meter " + pod1Cluster2IP
_, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
// Pod in Cluster 2 should not be able to reach Pods in Cluster 1
curlCmd = "curl --no-progress-meter " + pod1Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
curlCmd = "curl --no-progress-meter " + pod2Cluster1IP
_, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd)
Expect(err).To(HaveOccurred())
Expect(stderr).To(ContainSubstring("Failed to connect"))
})
})

View File

@@ -9,13 +9,12 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var _ = When("k3k is installed", func() {
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
@@ -42,128 +41,44 @@ var _ = When("k3k is installed", func() {
})
})
var _ = When("a ephemeral cluster is installed", func() {
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
var namespace string
var virtualCluster *VirtualCluster
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "nginx",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)
return pod.Status.Phase == corev1.PodRunning
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
_, _ = virtualCluster.NewNginxPod("")
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
@@ -177,7 +92,7 @@ var _ = When("a ephemeral cluster is installed", func() {
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
@@ -188,8 +103,8 @@ var _ = When("a ephemeral cluster is installed", func() {
By("Recover new config should succeed")
Eventually(func() error {
virtualK8sClient = NewVirtualK8sClient(cluster)
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
@@ -200,126 +115,52 @@ var _ = When("a ephemeral cluster is installed", func() {
var _ = When("a dynamic cluster is installed", func() {
var namespace string
var virtualCluster *VirtualCluster
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.DynamicPersistenceMode,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "nginx",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
//labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
)
return pod.Status.Phase == corev1.PodRunning
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
_, _ = virtualCluster.NewNginxPod("")
})
It("use the same bootstrap secret after a restart", func() {
ctx := context.Background()
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.DynamicPersistenceMode,
},
},
}
By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
NewVirtualCluster(cluster)
By("Waiting to get a kubernetes client for the virtual cluster")
virtualK8sClient := NewVirtualK8sClient(cluster)
_, err := virtualK8sClient.DiscoveryClient.ServerVersion()
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
@@ -333,8 +174,7 @@ var _ = When("a dynamic cluster is installed", func() {
By("Using old k8s client configuration should succeed")
Eventually(func() error {
virtualK8sClient = NewVirtualK8sClient(cluster)
_, err = virtualK8sClient.DiscoveryClient.ServerVersion()
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).

View File

@@ -1,29 +1,142 @@
package k3k_test
import (
"bytes"
"context"
"fmt"
"strings"
"sync"
"time"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func NewVirtualCluster(cluster v1alpha1.Cluster) {
type VirtualCluster struct {
Cluster *v1alpha1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
}
func NewVirtualCluster() *VirtualCluster {
GinkgoHelper()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
return &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
}
// NewVirtualClusters will create multiple Virtual Clusters asynchronously
func NewVirtualClusters(n int) []*VirtualCluster {
GinkgoHelper()
var clusters []*VirtualCluster
wg := sync.WaitGroup{}
wg.Add(n)
for range n {
go func() {
defer wg.Done()
defer GinkgoRecover()
clusters = append(clusters, NewVirtualCluster())
}()
}
wg.Wait()
return clusters
}
func NewNamespace() *corev1.Namespace {
GinkgoHelper()
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
return namespace
}
func DeleteNamespaces(names ...string) {
GinkgoHelper()
wg := sync.WaitGroup{}
wg.Add(len(names))
for _, name := range names {
go func() {
defer wg.Done()
defer GinkgoRecover()
deleteNamespace(name)
}()
}
wg.Wait()
}
func deleteNamespace(name string) {
GinkgoHelper()
By(fmt.Sprintf("Deleting namespace %s", name))
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
GracePeriodSeconds: ptr.To[int64](0),
})
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
},
},
}
}
func CreateCluster(cluster *v1alpha1.Cluster) {
GinkgoHelper()
ctx := context.Background()
err := k8sClient.Create(ctx, &cluster)
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
// check that the server Pod and the Kubelet are in Ready state
@@ -58,7 +171,13 @@ func NewVirtualCluster(cluster v1alpha1.Cluster) {
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
return virtualK8sClient
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
GinkgoHelper()
var (
@@ -72,7 +191,7 @@ func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
vKubeconfig := kubeconfig.New()
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
config, err = vKubeconfig.Extract(ctx, k8sClient, &cluster, hostIP)
config, err = vKubeconfig.Extract(ctx, k8sClient, cluster, hostIP)
return err
}).
WithTimeout(time.Minute * 2).
@@ -87,5 +206,97 @@ func NewVirtualK8sClient(cluster v1alpha1.Cluster) *kubernetes.Clientset {
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
return virtualK8sClient
return virtualK8sClient, restcfg
}
func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
GinkgoHelper()
if namespace == "" {
namespace = "default"
}
nginxPod := &corev1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "nginx-",
Namespace: namespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "nginx",
Image: "nginx",
}},
},
}
By("Creating Pod")
ctx := context.Background()
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
var podIP string
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
for _, pod := range podList.Items {
resourceName := pod.Annotations[translate.ResourceNameAnnotation]
resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
podIP = pod.Status.PodIP
fmt.Fprintf(GinkgoWriter,
"pod=%s resource=%s/%s status=%s podIP=%s\n",
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
)
return pod.Status.Phase == corev1.PodRunning && podIP != ""
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
// get the running pod from the virtual cluster
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
Expect(err).To(Not(HaveOccurred()))
return nginxPod, podIP
}
// ExecCmd exec command on specific pod and wait the command's output.
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
option := &corev1.PodExecOptions{
Command: []string{"sh", "-c", command},
Stdout: true,
Stderr: true,
}
req := c.Client.CoreV1().RESTClient().Post().Resource("pods").Name(pod.Name).Namespace(pod.Namespace).SubResource("exec")
req.VersionedParams(option, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(c.RestConfig, "POST", req.URL())
if err != nil {
return "", "", err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: stdout,
Stderr: stderr,
})
return stdout.String(), stderr.String(), err
}