mirror of
https://github.com/rancher/k3k.git
synced 2026-02-14 10:00:15 +00:00
* bump kubernetes to v0.33.7 * updated kuberneets api versions * bump tests * fix k3s version * fix test * centralize k8s version * remove focus * revert GetPodCondition, GetContainerStatus and pin of k8s.io/controller-manager
778 lines
20 KiB
Go
778 lines
20 KiB
Go
package k3k_test
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"maps"
|
|
"os"
|
|
"os/exec"
|
|
"path"
|
|
"path/filepath"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/go-logr/zapr"
|
|
"github.com/testcontainers/testcontainers-go"
|
|
"github.com/testcontainers/testcontainers-go/modules/k3s"
|
|
"go.uber.org/zap"
|
|
"helm.sh/helm/v3/pkg/action"
|
|
"helm.sh/helm/v3/pkg/chart/loader"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
"k8s.io/client-go/tools/remotecommand"
|
|
"k8s.io/kubernetes/pkg/api/v1/pod"
|
|
"k8s.io/utils/ptr"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
v1 "k8s.io/api/core/v1"
|
|
networkingv1 "k8s.io/api/networking/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
|
|
|
. "github.com/onsi/ginkgo/v2"
|
|
. "github.com/onsi/gomega"
|
|
)
|
|
|
|
const (
|
|
k3kNamespace = "k3k-system"
|
|
|
|
k3sVersion = "v1.33.7-k3s1"
|
|
|
|
e2eTestLabel = "e2e"
|
|
slowTestsLabel = "slow"
|
|
updateTestsLabel = "update"
|
|
persistenceTestsLabel = "persistence"
|
|
networkingTestsLabel = "networking"
|
|
statusTestsLabel = "status"
|
|
certificatesTestsLabel = "certificates"
|
|
registryTestsLabel = "registry"
|
|
|
|
registryImage = "registry:2"
|
|
registryCACertSecretName = "private-registry-ca-cert"
|
|
registryCertSecretName = "private-registry-cert"
|
|
registryConfigSecretName = "private-registry-config"
|
|
k3sRegistryConfigSecretName = "k3s-registry-config"
|
|
)
|
|
|
|
func TestTests(t *testing.T) {
|
|
RegisterFailHandler(Fail)
|
|
RunSpecs(t, "Tests Suite")
|
|
}
|
|
|
|
var (
|
|
k3sContainer *k3s.K3sContainer
|
|
hostIP string
|
|
restcfg *rest.Config
|
|
k8s *kubernetes.Clientset
|
|
k8sClient client.Client
|
|
kubeconfigPath string
|
|
repo string
|
|
helmActionConfig *action.Configuration
|
|
)
|
|
|
|
var _ = BeforeSuite(func() {
|
|
ctx := context.Background()
|
|
|
|
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
|
|
|
|
repo = os.Getenv("REPO")
|
|
if repo == "" {
|
|
repo = "rancher"
|
|
}
|
|
|
|
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
|
|
|
if dockerInstallEnabled {
|
|
installK3SDocker(ctx)
|
|
initKubernetesClient(ctx)
|
|
installK3kChart()
|
|
} else {
|
|
initKubernetesClient(ctx)
|
|
}
|
|
|
|
patchPVC(ctx, k8s)
|
|
})
|
|
|
|
func initKubernetesClient(ctx context.Context) {
|
|
var (
|
|
err error
|
|
kubeconfig []byte
|
|
)
|
|
|
|
kubeconfigPath := os.Getenv("KUBECONFIG")
|
|
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
|
|
|
kubeconfig, err = os.ReadFile(kubeconfigPath)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
hostIP, err = getServerIP(ctx, restcfg)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
k8s, err = kubernetes.NewForConfig(restcfg)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
scheme := buildScheme()
|
|
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
logger, err := zap.NewDevelopment()
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
log.SetLogger(zapr.NewLogger(logger))
|
|
}
|
|
|
|
func buildScheme() *runtime.Scheme {
|
|
scheme := runtime.NewScheme()
|
|
|
|
err := v1.AddToScheme(scheme)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
err = appsv1.AddToScheme(scheme)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
err = networkingv1.AddToScheme(scheme)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
err = v1beta1.AddToScheme(scheme)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
return scheme
|
|
}
|
|
|
|
func installK3SDocker(ctx context.Context) {
|
|
var (
|
|
err error
|
|
kubeconfig []byte
|
|
)
|
|
|
|
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
|
|
if k3sHostVersion == "" {
|
|
k3sHostVersion = k3sVersion
|
|
}
|
|
|
|
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
|
|
|
|
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
containerIP, err := k3sContainer.ContainerIP(ctx)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
GinkgoWriter.Println("K3s containerIP: " + containerIP)
|
|
|
|
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
tmpFile, err := os.CreateTemp("", "kubeconfig-")
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
_, err = tmpFile.Write(kubeconfig)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
Expect(tmpFile.Close()).To(Succeed())
|
|
kubeconfigPath = tmpFile.Name()
|
|
|
|
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
DeferCleanup(os.Remove, kubeconfigPath)
|
|
|
|
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
|
GinkgoWriter.Print(kubeconfigPath)
|
|
GinkgoWriter.Print(string(kubeconfig))
|
|
}
|
|
|
|
func installK3kChart() {
|
|
pwd, err := os.Getwd()
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
helmActionConfig = new(action.Configuration)
|
|
|
|
kubeconfig, err := os.ReadFile(kubeconfigPath)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
|
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
|
})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
iCli := action.NewInstall(helmActionConfig)
|
|
iCli.ReleaseName = "k3k"
|
|
iCli.Namespace = k3kNamespace
|
|
iCli.CreateNamespace = true
|
|
iCli.Timeout = time.Minute
|
|
iCli.Wait = true
|
|
|
|
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
|
imageMap, _ := controllerMap["image"].(map[string]any)
|
|
maps.Copy(imageMap, map[string]any{
|
|
"repository": repo + "/k3k",
|
|
"tag": "dev",
|
|
"pullPolicy": "IfNotPresent",
|
|
})
|
|
|
|
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
|
|
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
|
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
|
maps.Copy(sharedAgentImageMap, map[string]any{
|
|
"repository": repo + "/k3k-kubelet",
|
|
"tag": "dev",
|
|
})
|
|
|
|
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
|
|
}
|
|
|
|
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
|
deployments, err := clientset.AppsV1().Deployments(k3kNamespace).List(ctx, metav1.ListOptions{})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
Expect(deployments.Items).To(HaveLen(1))
|
|
|
|
k3kDeployment := &deployments.Items[0]
|
|
|
|
pvc := &v1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "coverage-data-pvc",
|
|
Namespace: k3kNamespace,
|
|
},
|
|
Spec: v1.PersistentVolumeClaimSpec{
|
|
AccessModes: []v1.PersistentVolumeAccessMode{
|
|
v1.ReadWriteOnce,
|
|
},
|
|
Resources: v1.VolumeResourceRequirements{
|
|
Requests: v1.ResourceList{
|
|
v1.ResourceStorage: resource.MustParse("100M"),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
_, err = clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
|
|
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
|
|
|
k3kSpec := k3kDeployment.Spec.Template.Spec
|
|
|
|
// check if the Deployment has already the volume for the coverage
|
|
for _, volumes := range k3kSpec.Volumes {
|
|
if volumes.Name == "tmp-covdata" {
|
|
return
|
|
}
|
|
}
|
|
|
|
k3kSpec.Volumes = append(k3kSpec.Volumes, v1.Volume{
|
|
Name: "tmp-covdata",
|
|
VolumeSource: v1.VolumeSource{
|
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: "coverage-data-pvc",
|
|
},
|
|
},
|
|
})
|
|
|
|
k3kSpec.Containers[0].VolumeMounts = append(k3kSpec.Containers[0].VolumeMounts, v1.VolumeMount{
|
|
Name: "tmp-covdata",
|
|
MountPath: "/tmp/covdata",
|
|
})
|
|
|
|
k3kSpec.Containers[0].Env = append(k3kSpec.Containers[0].Env, v1.EnvVar{
|
|
Name: "GOCOVERDIR",
|
|
Value: "/tmp/covdata",
|
|
})
|
|
|
|
k3kDeployment.Spec.Template.Spec = k3kSpec
|
|
|
|
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
Eventually(func() bool {
|
|
GinkgoWriter.Println("Checking K3k deployment status")
|
|
|
|
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
// 1. Check if the controller has observed the latest generation
|
|
if dep.Generation > dep.Status.ObservedGeneration {
|
|
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
|
|
return false
|
|
}
|
|
|
|
// 2. Check if all replicas have been updated
|
|
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
|
|
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
|
|
return false
|
|
}
|
|
|
|
// 3. Check if all updated replicas are available
|
|
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
|
|
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
|
|
return false
|
|
}
|
|
|
|
return true
|
|
}).
|
|
MustPassRepeatedly(5).
|
|
WithPolling(time.Second).
|
|
WithTimeout(time.Second * 30).
|
|
Should(BeTrue())
|
|
}
|
|
|
|
var _ = AfterSuite(func() {
|
|
ctx := context.Background()
|
|
|
|
goCoverDir := os.Getenv("GOCOVERDIR")
|
|
if goCoverDir == "" {
|
|
goCoverDir = path.Join(os.TempDir(), "covdata")
|
|
Expect(os.MkdirAll(goCoverDir, 0o755)).To(Succeed())
|
|
}
|
|
|
|
dumpK3kCoverageData(ctx, goCoverDir)
|
|
if k3sContainer != nil {
|
|
// dump k3s logs
|
|
k3sLogs, err := k3sContainer.Logs(ctx)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
writeLogs("k3s.log", k3sLogs)
|
|
|
|
// dump k3k controller logs
|
|
k3kLogs := getK3kLogs(ctx)
|
|
writeLogs("k3k.log", k3kLogs)
|
|
|
|
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
|
}
|
|
})
|
|
|
|
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
|
|
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
|
|
func dumpK3kCoverageData(ctx context.Context, folder string) {
|
|
By("Restarting k3k controller")
|
|
|
|
var podList v1.PodList
|
|
|
|
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
k3kPod := podList.Items[0]
|
|
k3kContainerName := k3kPod.Spec.Containers[0].Name
|
|
|
|
By("Restarting k3k controller " + k3kPod.Name + "/" + k3kContainerName)
|
|
|
|
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", k3kContainerName, "--", "/bin/sh", "-c", "kill 1")
|
|
output, err := cmd.CombinedOutput()
|
|
Expect(err).NotTo(HaveOccurred(), string(output))
|
|
|
|
By("Waiting to be ready again")
|
|
|
|
Eventually(func(g Gomega) {
|
|
key := types.NamespacedName{
|
|
Namespace: k3kNamespace,
|
|
Name: k3kPod.Name,
|
|
}
|
|
|
|
var controllerPod v1.Pod
|
|
|
|
err = k8sClient.Get(ctx, key, &controllerPod)
|
|
g.Expect(err).To(Not(HaveOccurred()))
|
|
|
|
_, cond := pod.GetPodCondition(&controllerPod.Status, v1.PodReady)
|
|
g.Expect(cond).NotTo(BeNil())
|
|
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
|
}).
|
|
MustPassRepeatedly(5).
|
|
WithPolling(time.Second * 2).
|
|
WithTimeout(time.Minute * 2).
|
|
Should(Succeed())
|
|
|
|
By("Controller is ready again, dumping coverage data")
|
|
|
|
GinkgoWriter.Printf("Downloading covdata from k3k controller %s/%s to %s\n", k3kNamespace, k3kPod.Name, folder)
|
|
|
|
tarPod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "tar",
|
|
Namespace: k3kNamespace,
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{{
|
|
Name: "tar",
|
|
Image: "busybox",
|
|
Command: []string{"/bin/sh", "-c", "sleep 3600"},
|
|
VolumeMounts: []v1.VolumeMount{{
|
|
Name: "tmp-covdata",
|
|
MountPath: "/tmp/covdata",
|
|
}},
|
|
}},
|
|
Volumes: []v1.Volume{{
|
|
Name: "tmp-covdata",
|
|
VolumeSource: v1.VolumeSource{
|
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: "coverage-data-pvc",
|
|
},
|
|
},
|
|
}},
|
|
},
|
|
}
|
|
|
|
_, err = k8s.CoreV1().Pods(k3kNamespace).Create(ctx, tarPod, metav1.CreateOptions{})
|
|
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
|
|
|
By("Waiting for tar pod to be ready")
|
|
|
|
Eventually(func(g Gomega) {
|
|
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(tarPod), tarPod)
|
|
g.Expect(err).To(Not(HaveOccurred()))
|
|
|
|
_, cond := pod.GetPodCondition(&tarPod.Status, v1.PodReady)
|
|
g.Expect(cond).NotTo(BeNil())
|
|
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
|
}).
|
|
WithPolling(time.Second).
|
|
WithTimeout(time.Minute).
|
|
Should(Succeed())
|
|
|
|
By("Copying covdata from tar pod")
|
|
|
|
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, tarPod.Name), folder)
|
|
output, err = cmd.CombinedOutput()
|
|
Expect(err).NotTo(HaveOccurred(), string(output))
|
|
|
|
Expect(k8sClient.Delete(ctx, tarPod)).To(Succeed())
|
|
}
|
|
|
|
func getK3kLogs(ctx context.Context) io.ReadCloser {
|
|
var podList v1.PodList
|
|
|
|
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
k3kPod := podList.Items[0]
|
|
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{Previous: true})
|
|
podLogs, err := req.Stream(ctx)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
return podLogs
|
|
}
|
|
|
|
func writeLogs(filename string, logs io.ReadCloser) {
|
|
logsStr, err := io.ReadAll(logs)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
tempfile := path.Join(os.TempDir(), filename)
|
|
err = os.WriteFile(tempfile, []byte(logsStr), 0o644)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
GinkgoWriter.Println("logs written to: " + filename)
|
|
|
|
_ = logs.Close()
|
|
}
|
|
|
|
func readFileWithinPod(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, name, namespace, path string) ([]byte, error) {
|
|
command := []string{"cat", path}
|
|
|
|
output := new(bytes.Buffer)
|
|
|
|
stderr, err := podExec(ctx, client, config, namespace, name, command, nil, output)
|
|
if err != nil || len(stderr) > 0 {
|
|
return nil, fmt.Errorf("failed to read the following file %s: %v", path, err)
|
|
}
|
|
|
|
return output.Bytes(), nil
|
|
}
|
|
|
|
func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, namespace, name string, command []string, stdin io.Reader, stdout io.Writer) ([]byte, error) {
|
|
req := clientset.CoreV1().RESTClient().Post().
|
|
Resource("pods").
|
|
Name(name).
|
|
Namespace(namespace).
|
|
SubResource("exec")
|
|
scheme := runtime.NewScheme()
|
|
|
|
if err := v1.AddToScheme(scheme); err != nil {
|
|
return nil, fmt.Errorf("error adding to scheme: %v", err)
|
|
}
|
|
|
|
parameterCodec := runtime.NewParameterCodec(scheme)
|
|
|
|
req.VersionedParams(&v1.PodExecOptions{
|
|
Command: command,
|
|
Stdin: stdin != nil,
|
|
Stdout: stdout != nil,
|
|
Stderr: true,
|
|
TTY: false,
|
|
}, parameterCodec)
|
|
|
|
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("error while creating Executor: %v", err)
|
|
}
|
|
|
|
var stderr bytes.Buffer
|
|
|
|
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
|
Stdin: stdin,
|
|
Stdout: stdout,
|
|
Stderr: &stderr,
|
|
Tty: false,
|
|
})
|
|
if err != nil {
|
|
return nil, fmt.Errorf("error in Stream: %v", err)
|
|
}
|
|
|
|
return stderr.Bytes(), nil
|
|
}
|
|
|
|
func caCertSecret(name, namespace string, crt, key []byte) *v1.Secret {
|
|
return &v1.Secret{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Secret",
|
|
APIVersion: "v1",
|
|
},
|
|
Data: map[string][]byte{
|
|
"tls.crt": crt,
|
|
"tls.key": key,
|
|
},
|
|
}
|
|
}
|
|
|
|
func privateRegistry(ctx context.Context, namespace string) error {
|
|
caCrtMap := map[string]string{
|
|
"tls.crt": filepath.Join("testdata", "registry", "certs", "ca.crt"),
|
|
"tls.key": filepath.Join("testdata", "registry", "certs", "ca.key"),
|
|
}
|
|
|
|
caSecret, err := buildRegistryConfigSecret(caCrtMap, namespace, registryCACertSecretName, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, caSecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryCrtMap := map[string]string{
|
|
"tls.crt": filepath.Join("testdata", "registry", "certs", "registry.crt"),
|
|
"tls.key": filepath.Join("testdata", "registry", "certs", "registry.key"),
|
|
}
|
|
|
|
registrySecret, err := buildRegistryConfigSecret(registryCrtMap, namespace, registryCertSecretName, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, registrySecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
configMap := map[string]string{
|
|
"config.yml": filepath.Join("testdata", "registry", "config.yml"),
|
|
}
|
|
|
|
configSecret, err := buildRegistryConfigSecret(configMap, namespace, registryConfigSecretName, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, configSecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
k3sRegistryConfig := map[string]string{
|
|
"registries.yaml": filepath.Join("testdata", "registry", "registries.yaml"),
|
|
}
|
|
|
|
k3sRegistrySecret, err := buildRegistryConfigSecret(k3sRegistryConfig, namespace, k3sRegistryConfigSecretName, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, k3sRegistrySecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryDeployment := &appsv1.Deployment{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry",
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Deployment",
|
|
APIVersion: "apps/v1",
|
|
},
|
|
Spec: appsv1.DeploymentSpec{
|
|
Replicas: ptr.To(int32(1)),
|
|
Selector: &metav1.LabelSelector{
|
|
MatchLabels: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
},
|
|
Template: v1.PodTemplateSpec{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Labels: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "private-registry",
|
|
Image: registryImage,
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "config",
|
|
MountPath: "/etc/docker/registry/",
|
|
},
|
|
{
|
|
Name: "ca-cert",
|
|
MountPath: "/etc/docker/registry/ssl/ca",
|
|
},
|
|
{
|
|
Name: "registry-cert",
|
|
MountPath: "/etc/docker/registry/ssl/registry",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "config",
|
|
VolumeSource: v1.VolumeSource{
|
|
Secret: &v1.SecretVolumeSource{
|
|
SecretName: "private-registry-config",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "ca-cert",
|
|
VolumeSource: v1.VolumeSource{
|
|
Secret: &v1.SecretVolumeSource{
|
|
SecretName: "private-registry-ca-cert",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "registry-cert",
|
|
VolumeSource: v1.VolumeSource{
|
|
Secret: &v1.SecretVolumeSource{
|
|
SecretName: "private-registry-cert",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, registryDeployment); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryService := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry",
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Service",
|
|
APIVersion: "v1",
|
|
},
|
|
Spec: v1.ServiceSpec{
|
|
Selector: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "registry-port",
|
|
Port: 5000,
|
|
TargetPort: intstr.FromInt(5000),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
return k8sClient.Create(ctx, registryService)
|
|
}
|
|
|
|
func buildRegistryNetPolicy(ctx context.Context, namespace string) error {
|
|
np := networkingv1.NetworkPolicy{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry-test-netpol",
|
|
Namespace: namespace,
|
|
},
|
|
Spec: networkingv1.NetworkPolicySpec{
|
|
PodSelector: metav1.LabelSelector{
|
|
MatchLabels: map[string]string{
|
|
"role": "server",
|
|
},
|
|
},
|
|
PolicyTypes: []networkingv1.PolicyType{
|
|
networkingv1.PolicyTypeEgress,
|
|
},
|
|
Egress: []networkingv1.NetworkPolicyEgressRule{
|
|
{
|
|
To: []networkingv1.NetworkPolicyPeer{
|
|
{
|
|
IPBlock: &networkingv1.IPBlock{
|
|
CIDR: "10.0.0.0/8",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
return k8sClient.Create(ctx, &np)
|
|
}
|
|
|
|
func buildRegistryConfigSecret(tlsMap map[string]string, namespace, name string, tlsSecret bool) (*v1.Secret, error) {
|
|
secretType := v1.SecretTypeOpaque
|
|
if tlsSecret {
|
|
secretType = v1.SecretTypeTLS
|
|
}
|
|
|
|
data := make(map[string][]byte)
|
|
|
|
for key, path := range tlsMap {
|
|
b, err := os.ReadFile(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
data[key] = b
|
|
}
|
|
|
|
secret := &v1.Secret{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Secret",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
},
|
|
Type: secretType,
|
|
Data: data,
|
|
}
|
|
|
|
return secret, nil
|
|
}
|