mirror of
https://github.com/rancher/k3k.git
synced 2026-05-06 01:16:52 +00:00
Use K3S host cluster for E2E tests (#492)
* Add kubeconfig to e2e_tests Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * add E2E_KUBECONFIG env variable Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix yaml permissions for kubeconfig Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix image name and use ttl.sh Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * add uuidgen result to a file Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * add hostIP * Add k3s version to e2e test Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * remove comment Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * remove virtual mode tests Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix failed test Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Add KUBECONFIG env variable to the make install Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * add k3kcli to github_path Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Use docker installation for testing the cli Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * typo Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix test cli Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * lint Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fixes Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * typo Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> --------- Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
This commit is contained in:
29
.github/workflows/test.yaml
vendored
29
.github/workflows/test.yaml
vendored
@@ -80,17 +80,30 @@ jobs:
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Build and package
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
@@ -99,7 +112,11 @@ jobs:
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
@@ -150,12 +167,13 @@ jobs:
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
@@ -169,6 +187,9 @@ jobs:
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run cli tests
|
||||
env:
|
||||
K3K_DOCKER_INSTALL: "true"
|
||||
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
|
||||
run: make test-cli
|
||||
|
||||
- name: Convert coverage data
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -201,3 +203,198 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), func(
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial environment variables for server
|
||||
cluster.Spec.ServerEnvs = []corev1.EnvVar{
|
||||
{
|
||||
Name: "TEST_SERVER_ENV_1",
|
||||
Value: "not_upgraded",
|
||||
},
|
||||
{
|
||||
Name: "TEST_SERVER_ENV_2",
|
||||
Value: "toBeRemoved",
|
||||
},
|
||||
}
|
||||
// Add initial environment variables for agent
|
||||
cluster.Spec.AgentEnvs = []corev1.EnvVar{
|
||||
{
|
||||
Name: "TEST_AGENT_ENV_1",
|
||||
Value: "not_upgraded",
|
||||
},
|
||||
{
|
||||
Name: "TEST_AGENT_ENV_2",
|
||||
Value: "toBeRemoved",
|
||||
},
|
||||
}
|
||||
|
||||
cluster.Spec.Mode = v1alpha1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To(int32(1))
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
serverPod := sPods[0]
|
||||
|
||||
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(serverEnv1).To(Equal("not_upgraded"))
|
||||
|
||||
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(serverEnv2).To(Equal("toBeRemoved"))
|
||||
|
||||
aPods := listAgentPods(ctx, virtualCluster)
|
||||
Expect(len(aPods)).To(Equal(1))
|
||||
|
||||
agentPod := aPods[0]
|
||||
|
||||
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(agentEnv1).To(Equal("not_upgraded"))
|
||||
|
||||
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(agentEnv2).To(Equal("toBeRemoved"))
|
||||
})
|
||||
It("will update server and agent envs when cluster is updated", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// update both agent and server envs
|
||||
cluster.Spec.ServerEnvs = []corev1.EnvVar{
|
||||
{
|
||||
Name: "TEST_SERVER_ENV_1",
|
||||
Value: "upgraded",
|
||||
},
|
||||
{
|
||||
Name: "TEST_SERVER_ENV_3",
|
||||
Value: "new",
|
||||
},
|
||||
}
|
||||
cluster.Spec.AgentEnvs = []corev1.EnvVar{
|
||||
{
|
||||
Name: "TEST_AGENT_ENV_1",
|
||||
Value: "upgraded",
|
||||
},
|
||||
{
|
||||
Name: "TEST_AGENT_ENV_3",
|
||||
Value: "new",
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// server pods
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(serverEnv1).To(Equal("upgraded"))
|
||||
|
||||
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
|
||||
g.Expect(ok).To(BeFalse())
|
||||
|
||||
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(serverEnv3).To(Equal("new"))
|
||||
|
||||
// agent pods
|
||||
aPods := listAgentPods(ctx, virtualCluster)
|
||||
g.Expect(len(aPods)).To(Equal(1))
|
||||
|
||||
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(agentEnv1).To(Equal("upgraded"))
|
||||
|
||||
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
|
||||
g.Expect(ok).To(BeFalse())
|
||||
|
||||
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(agentEnv3).To(Equal("new"))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 2).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its server args", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial args for server
|
||||
cluster.Spec.ServerArgs = []string{
|
||||
"--node-label=test_server=not_upgraded",
|
||||
}
|
||||
|
||||
cluster.Spec.Mode = v1alpha1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To(int32(1))
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
serverPod := sPods[0]
|
||||
|
||||
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
|
||||
})
|
||||
It("will update server args", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cluster.Spec.ServerArgs = []string{
|
||||
"--node-label=test_server=upgraded",
|
||||
}
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// server pods
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 2).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -89,7 +90,7 @@ func NewVirtualClusters(n int) []*VirtualCluster {
|
||||
func NewNamespace() *corev1.Namespace {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
|
||||
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -391,3 +392,16 @@ func isArgFound(pod *corev1.Pod, arg string) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getServerIP(ctx context.Context, cfg *rest.Config) (string, error) {
|
||||
if k3sContainer != nil {
|
||||
return k3sContainer.ContainerIP(ctx)
|
||||
}
|
||||
|
||||
u, err := url.Parse(cfg.Host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// If Host includes a port, u.Hostname() extracts just the hostname part
|
||||
return u.Hostname(), nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
// check that the controller is running
|
||||
Eventually(func() bool {
|
||||
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
|
||||
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
|
||||
podList, err := k8s.CoreV1().Pods(k3kNamespace).List(context.Background(), opts)
|
||||
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(podList.Items).To(Not(BeEmpty()))
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -48,56 +49,57 @@ func TestTests(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
k3sContainer *k3s.K3sContainer
|
||||
hostIP string
|
||||
restcfg *rest.Config
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
k3sContainer *k3s.K3sContainer
|
||||
hostIP string
|
||||
restcfg *rest.Config
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
repo string
|
||||
helmActionConfig *action.Configuration
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
ctx := context.Background()
|
||||
|
||||
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
|
||||
|
||||
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.32.1-k3s1")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
repo = os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
hostIP, err = k3sContainer.ContainerIP(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
||||
|
||||
GinkgoWriter.Println("K3s containerIP: " + hostIP)
|
||||
|
||||
kubeconfig, err := k3sContainer.GetKubeConfig(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "kubeconfig-")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, err = tmpFile.Write(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
initKubernetesClient(kubeconfig)
|
||||
installK3kChart(ctx, kubeconfig)
|
||||
if dockerInstallEnabled {
|
||||
installK3SDocker(ctx)
|
||||
initKubernetesClient(ctx)
|
||||
installK3kChart()
|
||||
} else {
|
||||
initKubernetesClient(ctx)
|
||||
}
|
||||
|
||||
patchPVC(ctx, k8s)
|
||||
})
|
||||
|
||||
func initKubernetesClient(kubeconfig []byte) {
|
||||
var err error
|
||||
func initKubernetesClient(ctx context.Context) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
||||
|
||||
kubeconfig, err = os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
hostIP, err = getServerIP(ctx, restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k8s, err = kubernetes.NewForConfig(restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -107,6 +109,7 @@ func initKubernetesClient(kubeconfig []byte) {
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
}
|
||||
|
||||
@@ -121,24 +124,68 @@ func buildScheme() *runtime.Scheme {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func installK3kChart(ctx context.Context, kubeconfig []byte) {
|
||||
func installK3SDocker(ctx context.Context) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
|
||||
if k3sHostVersion == "" {
|
||||
k3sHostVersion = "v1.32.1+k3s1"
|
||||
}
|
||||
|
||||
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
|
||||
|
||||
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
containerIP, err := k3sContainer.ContainerIP(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("K3s containerIP: " + containerIP)
|
||||
|
||||
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "kubeconfig-")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, err = tmpFile.Write(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
GinkgoWriter.Print(kubeconfigPath)
|
||||
GinkgoWriter.Print(string(kubeconfig))
|
||||
}
|
||||
|
||||
func installK3kChart() {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
actionConfig := new(action.Configuration)
|
||||
helmActionConfig = new(action.Configuration)
|
||||
|
||||
kubeconfig, err := os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = actionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
iCli := action.NewInstall(actionConfig)
|
||||
iCli := action.NewInstall(helmActionConfig)
|
||||
iCli.ReleaseName = k3kName
|
||||
iCli.Namespace = k3kNamespace
|
||||
iCli.CreateNamespace = true
|
||||
@@ -148,7 +195,7 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": "rancher/k3k",
|
||||
"repository": repo + "/k3k",
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
@@ -157,13 +204,10 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": "rancher/k3k-kubelet",
|
||||
"repository": repo + "/k3k-kubelet",
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -279,17 +323,18 @@ var _ = AfterSuite(func() {
|
||||
}
|
||||
|
||||
dumpK3kCoverageData(ctx, goCoverDir)
|
||||
if k3sContainer != nil {
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", k3sLogs)
|
||||
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", k3sLogs)
|
||||
// dump k3k controller logs
|
||||
k3kLogs := getK3kLogs(ctx)
|
||||
writeLogs("k3k.log", k3kLogs)
|
||||
|
||||
// dump k3k controller logs
|
||||
k3kLogs := getK3kLogs(ctx)
|
||||
writeLogs("k3k.log", k3kLogs)
|
||||
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
}
|
||||
})
|
||||
|
||||
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
|
||||
|
||||
Reference in New Issue
Block a user