mirror of
https://github.com/rancher/k3k.git
synced 2026-05-17 14:56:42 +00:00
633 lines
16 KiB
Go
633 lines
16 KiB
Go
package k3k_test
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"os/exec"
|
|
"path"
|
|
"path/filepath"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/testcontainers/testcontainers-go"
|
|
"github.com/testcontainers/testcontainers-go/modules/k3s"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/remotecommand"
|
|
"k8s.io/kubernetes/pkg/api/v1/pod"
|
|
"k8s.io/utils/ptr"
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
|
corev1 "k8s.io/api/core/v1"
|
|
networkingv1 "k8s.io/api/networking/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
fwclient "github.com/rancher/k3k/tests/framework/client"
|
|
fwcontainer "github.com/rancher/k3k/tests/framework/container"
|
|
fwlog "github.com/rancher/k3k/tests/framework/log"
|
|
|
|
. "github.com/onsi/ginkgo/v2"
|
|
. "github.com/onsi/gomega"
|
|
)
|
|
|
|
const (
|
|
k3kNamespace = "k3k-system"
|
|
|
|
k3sVersion = "v1.35.2-k3s1"
|
|
k3sOldVersion = "v1.35.0-k3s1"
|
|
|
|
e2eTestLabel = "e2e"
|
|
slowTestsLabel = "slow"
|
|
updateTestsLabel = "update"
|
|
persistenceTestsLabel = "persistence"
|
|
networkingTestsLabel = "networking"
|
|
statusTestsLabel = "status"
|
|
certificatesTestsLabel = "certificates"
|
|
registryTestsLabel = "registry"
|
|
|
|
registryImage = "registry:2"
|
|
registryCACertSecretName = "private-registry-ca-cert"
|
|
registryCertSecretName = "private-registry-cert"
|
|
registryConfigSecretName = "private-registry-config"
|
|
k3sRegistryConfigSecretName = "k3s-registry-config"
|
|
)
|
|
|
|
func TestTests(t *testing.T) {
|
|
RegisterFailHandler(Fail)
|
|
RunSpecs(t, "Tests Suite")
|
|
}
|
|
|
|
var (
|
|
k3sContainer *k3s.K3sContainer
|
|
hostIP string
|
|
restcfg *rest.Config
|
|
k8s *kubernetes.Clientset
|
|
k8sClient client.Client
|
|
kubeconfigPath string
|
|
)
|
|
|
|
var _ = BeforeSuite(func() {
|
|
ctx := context.Background()
|
|
|
|
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
|
|
|
|
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
|
|
|
if dockerInstallEnabled {
|
|
repo := os.Getenv("REPO")
|
|
if repo == "" {
|
|
repo = "rancher"
|
|
}
|
|
|
|
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
|
|
initKubernetesClient(ctx)
|
|
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
|
|
} else {
|
|
initKubernetesClient(ctx)
|
|
}
|
|
|
|
patchPVC(ctx, k8s)
|
|
})
|
|
|
|
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
|
|
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
|
|
if k3sHostVersion == "" {
|
|
k3sHostVersion = k3sVersion
|
|
}
|
|
|
|
k3sContainer, kubeconfigPath = fwcontainer.SetupK3s(ctx, k3sHostVersion, controllerImage, kubeletImage)
|
|
}
|
|
|
|
func initKubernetesClient(ctx context.Context) {
|
|
scheme := fwclient.NewScheme()
|
|
config, err := fwclient.InitFromKubeconfig(ctx, scheme, k3sContainer)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
hostIP = config.HostIP
|
|
restcfg = config.RestConfig
|
|
k8s = config.Clientset
|
|
k8sClient = config.Client
|
|
}
|
|
|
|
func installK3kChart(controllerImage, kubeletImage string) {
|
|
installer := fwcontainer.NewHelmInstaller(controllerImage, kubeletImage, kubeconfigPath)
|
|
kubeconfig, err := os.ReadFile(kubeconfigPath)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
restClientGetter, err := fwclient.NewRESTClientGetter(kubeconfig)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
installer.InstallK3kChart(restClientGetter)
|
|
}
|
|
|
|
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
|
deployments, err := clientset.AppsV1().Deployments(k3kNamespace).List(ctx, metav1.ListOptions{})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
Expect(deployments.Items).To(HaveLen(1))
|
|
|
|
k3kDeployment := &deployments.Items[0]
|
|
|
|
pvc := &corev1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "coverage-data-pvc",
|
|
Namespace: k3kNamespace,
|
|
},
|
|
Spec: corev1.PersistentVolumeClaimSpec{
|
|
AccessModes: []corev1.PersistentVolumeAccessMode{
|
|
corev1.ReadWriteOnce,
|
|
},
|
|
Resources: corev1.VolumeResourceRequirements{
|
|
Requests: corev1.ResourceList{
|
|
corev1.ResourceStorage: resource.MustParse("100M"),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
_, err = clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
|
|
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
|
|
|
k3kSpec := k3kDeployment.Spec.Template.Spec
|
|
|
|
// check if the Deployment has already the volume for the coverage
|
|
for _, volumes := range k3kSpec.Volumes {
|
|
if volumes.Name == "tmp-covdata" {
|
|
return
|
|
}
|
|
}
|
|
|
|
k3kSpec.Volumes = append(k3kSpec.Volumes, corev1.Volume{
|
|
Name: "tmp-covdata",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: "coverage-data-pvc",
|
|
},
|
|
},
|
|
})
|
|
|
|
k3kSpec.Containers[0].VolumeMounts = append(k3kSpec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
|
Name: "tmp-covdata",
|
|
MountPath: "/tmp/covdata",
|
|
})
|
|
|
|
k3kSpec.Containers[0].Env = append(k3kSpec.Containers[0].Env, corev1.EnvVar{
|
|
Name: "GOCOVERDIR",
|
|
Value: "/tmp/covdata",
|
|
})
|
|
|
|
k3kDeployment.Spec.Template.Spec = k3kSpec
|
|
|
|
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
|
|
Eventually(func(g Gomega) {
|
|
GinkgoWriter.Println("Checking K3k deployment status")
|
|
|
|
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
|
|
g.Expect(err).To(Not(HaveOccurred()))
|
|
g.Expect(dep.Generation).To(Equal(dep.Status.ObservedGeneration))
|
|
|
|
var availableCond appsv1.DeploymentCondition
|
|
|
|
for _, cond := range dep.Status.Conditions {
|
|
if cond.Type == appsv1.DeploymentAvailable {
|
|
availableCond = cond
|
|
break
|
|
}
|
|
}
|
|
|
|
g.Expect(availableCond.Type).To(Equal(appsv1.DeploymentAvailable))
|
|
g.Expect(availableCond.Status).To(Equal(corev1.ConditionTrue))
|
|
}).
|
|
WithPolling(time.Second).
|
|
WithTimeout(time.Second * 30).
|
|
Should(Succeed())
|
|
}
|
|
|
|
var _ = AfterSuite(func() {
|
|
ctx := context.Background()
|
|
|
|
goCoverDir := os.Getenv("GOCOVERDIR")
|
|
if goCoverDir == "" {
|
|
goCoverDir = path.Join(os.TempDir(), "covdata")
|
|
Expect(os.MkdirAll(goCoverDir, 0o755)).To(Succeed())
|
|
}
|
|
|
|
dumpK3kCoverageData(ctx, goCoverDir)
|
|
|
|
if k3sContainer != nil {
|
|
// dump k3s logs
|
|
k3sLogs, err := k3sContainer.Logs(ctx)
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
fwlog.WriteLogs("k3s.log", k3sLogs)
|
|
|
|
// dump k3k controller logs
|
|
k3kLogs := fwlog.GetK3kPodLogs(ctx, k8sClient, k8s, k3kNamespace)
|
|
fwlog.WriteLogs("k3k.log", k3kLogs)
|
|
|
|
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
|
}
|
|
})
|
|
|
|
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
|
|
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
|
|
func dumpK3kCoverageData(ctx context.Context, folder string) {
|
|
By("Restarting k3k controller")
|
|
|
|
var podList corev1.PodList
|
|
|
|
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
|
Expect(err).To(Not(HaveOccurred()))
|
|
Expect(podList.Items).NotTo(BeEmpty())
|
|
|
|
k3kPod := podList.Items[0]
|
|
k3kContainerName := k3kPod.Spec.Containers[0].Name
|
|
|
|
By("Restarting k3k controller " + k3kPod.Name + "/" + k3kContainerName)
|
|
|
|
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", k3kContainerName, "--", "/bin/sh", "-c", "kill 1")
|
|
output, err := cmd.CombinedOutput()
|
|
Expect(err).NotTo(HaveOccurred(), string(output))
|
|
|
|
By("Waiting to be ready again")
|
|
|
|
Eventually(func(g Gomega) {
|
|
key := types.NamespacedName{
|
|
Namespace: k3kNamespace,
|
|
Name: k3kPod.Name,
|
|
}
|
|
|
|
var controllerPod corev1.Pod
|
|
|
|
err = k8sClient.Get(ctx, key, &controllerPod)
|
|
g.Expect(err).To(Not(HaveOccurred()))
|
|
|
|
_, cond := pod.GetPodCondition(&controllerPod.Status, corev1.PodReady)
|
|
g.Expect(cond).NotTo(BeNil())
|
|
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
|
}).
|
|
MustPassRepeatedly(5).
|
|
WithPolling(time.Second * 2).
|
|
WithTimeout(time.Minute * 2).
|
|
Should(Succeed())
|
|
|
|
By("Controller is ready again, dumping coverage data")
|
|
|
|
GinkgoWriter.Printf("Downloading covdata from k3k controller %s/%s to %s\n", k3kNamespace, k3kPod.Name, folder)
|
|
|
|
tarPod := &corev1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "tar",
|
|
Namespace: k3kNamespace,
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Containers: []corev1.Container{{
|
|
Name: "tar",
|
|
Image: "busybox",
|
|
Command: []string{"/bin/sh", "-c", "sleep 3600"},
|
|
VolumeMounts: []corev1.VolumeMount{{
|
|
Name: "tmp-covdata",
|
|
MountPath: "/tmp/covdata",
|
|
}},
|
|
}},
|
|
Volumes: []corev1.Volume{{
|
|
Name: "tmp-covdata",
|
|
VolumeSource: corev1.VolumeSource{
|
|
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: "coverage-data-pvc",
|
|
},
|
|
},
|
|
}},
|
|
},
|
|
}
|
|
|
|
_, err = k8s.CoreV1().Pods(k3kNamespace).Create(ctx, tarPod, metav1.CreateOptions{})
|
|
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
|
|
|
By("Waiting for tar pod to be ready")
|
|
|
|
Eventually(func(g Gomega) {
|
|
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(tarPod), tarPod)
|
|
g.Expect(err).To(Not(HaveOccurred()))
|
|
|
|
_, cond := pod.GetPodCondition(&tarPod.Status, corev1.PodReady)
|
|
g.Expect(cond).NotTo(BeNil())
|
|
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
|
}).
|
|
WithPolling(time.Second).
|
|
WithTimeout(time.Minute).
|
|
Should(Succeed())
|
|
|
|
By("Copying covdata from tar pod")
|
|
|
|
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, tarPod.Name), folder)
|
|
output, err = cmd.CombinedOutput()
|
|
Expect(err).NotTo(HaveOccurred(), string(output))
|
|
|
|
Expect(k8sClient.Delete(ctx, tarPod)).To(Succeed())
|
|
}
|
|
|
|
func readFileWithinPod(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, name, namespace, path string) ([]byte, error) {
|
|
command := []string{"cat", path}
|
|
|
|
output := new(bytes.Buffer)
|
|
|
|
stderr, err := podExec(ctx, client, config, namespace, name, command, nil, output)
|
|
if err != nil || len(stderr) > 0 {
|
|
return nil, fmt.Errorf("failed to read the following file %s: %v", path, err)
|
|
}
|
|
|
|
return output.Bytes(), nil
|
|
}
|
|
|
|
func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, namespace, name string, command []string, stdin io.Reader, stdout io.Writer) ([]byte, error) {
|
|
req := clientset.CoreV1().RESTClient().Post().
|
|
Resource("pods").
|
|
Name(name).
|
|
Namespace(namespace).
|
|
SubResource("exec")
|
|
scheme := runtime.NewScheme()
|
|
|
|
if err := corev1.AddToScheme(scheme); err != nil {
|
|
return nil, fmt.Errorf("error adding to scheme: %v", err)
|
|
}
|
|
|
|
parameterCodec := runtime.NewParameterCodec(scheme)
|
|
|
|
req.VersionedParams(&corev1.PodExecOptions{
|
|
Command: command,
|
|
Stdin: stdin != nil,
|
|
Stdout: stdout != nil,
|
|
Stderr: true,
|
|
TTY: false,
|
|
}, parameterCodec)
|
|
|
|
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("error while creating Executor: %v", err)
|
|
}
|
|
|
|
var stderr bytes.Buffer
|
|
|
|
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
|
Stdin: stdin,
|
|
Stdout: stdout,
|
|
Stderr: &stderr,
|
|
Tty: false,
|
|
})
|
|
if err != nil {
|
|
return nil, fmt.Errorf("error in Stream: %v", err)
|
|
}
|
|
|
|
return stderr.Bytes(), nil
|
|
}
|
|
|
|
func caCertSecret(name, namespace string, crt, key []byte) *corev1.Secret {
|
|
return &corev1.Secret{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Secret",
|
|
APIVersion: "v1",
|
|
},
|
|
Data: map[string][]byte{
|
|
"tls.crt": crt,
|
|
"tls.key": key,
|
|
},
|
|
}
|
|
}
|
|
|
|
func privateRegistry(ctx context.Context, namespace string) error {
|
|
caCrtMap := map[string]string{
|
|
"tls.crt": filepath.Join("testdata", "registry", "certs", "ca.crt"),
|
|
"tls.key": filepath.Join("testdata", "registry", "certs", "ca.key"),
|
|
}
|
|
|
|
caSecret, err := buildRegistryConfigSecret(caCrtMap, namespace, registryCACertSecretName, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, caSecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryCrtMap := map[string]string{
|
|
"tls.crt": filepath.Join("testdata", "registry", "certs", "registry.crt"),
|
|
"tls.key": filepath.Join("testdata", "registry", "certs", "registry.key"),
|
|
}
|
|
|
|
registrySecret, err := buildRegistryConfigSecret(registryCrtMap, namespace, registryCertSecretName, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, registrySecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
configMap := map[string]string{
|
|
"config.yml": filepath.Join("testdata", "registry", "config.yml"),
|
|
}
|
|
|
|
configSecret, err := buildRegistryConfigSecret(configMap, namespace, registryConfigSecretName, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, configSecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
k3sRegistryConfig := map[string]string{
|
|
"registries.yaml": filepath.Join("testdata", "registry", "registries.yaml"),
|
|
}
|
|
|
|
k3sRegistrySecret, err := buildRegistryConfigSecret(k3sRegistryConfig, namespace, k3sRegistryConfigSecretName, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, k3sRegistrySecret); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryDeployment := &appsv1.Deployment{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry",
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Deployment",
|
|
APIVersion: "apps/v1",
|
|
},
|
|
Spec: appsv1.DeploymentSpec{
|
|
Replicas: ptr.To(int32(1)),
|
|
Selector: &metav1.LabelSelector{
|
|
MatchLabels: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
},
|
|
Template: corev1.PodTemplateSpec{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Labels: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
},
|
|
Spec: corev1.PodSpec{
|
|
Containers: []corev1.Container{
|
|
{
|
|
Name: "private-registry",
|
|
Image: registryImage,
|
|
VolumeMounts: []corev1.VolumeMount{
|
|
{
|
|
Name: "config",
|
|
MountPath: "/etc/docker/registry/",
|
|
},
|
|
{
|
|
Name: "ca-cert",
|
|
MountPath: "/etc/docker/registry/ssl/ca",
|
|
},
|
|
{
|
|
Name: "registry-cert",
|
|
MountPath: "/etc/docker/registry/ssl/registry",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []corev1.Volume{
|
|
{
|
|
Name: "config",
|
|
VolumeSource: corev1.VolumeSource{
|
|
Secret: &corev1.SecretVolumeSource{
|
|
SecretName: "private-registry-config",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "ca-cert",
|
|
VolumeSource: corev1.VolumeSource{
|
|
Secret: &corev1.SecretVolumeSource{
|
|
SecretName: "private-registry-ca-cert",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "registry-cert",
|
|
VolumeSource: corev1.VolumeSource{
|
|
Secret: &corev1.SecretVolumeSource{
|
|
SecretName: "private-registry-cert",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
if err := k8sClient.Create(ctx, registryDeployment); err != nil {
|
|
return err
|
|
}
|
|
|
|
registryService := &corev1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry",
|
|
Namespace: namespace,
|
|
},
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Service",
|
|
APIVersion: "v1",
|
|
},
|
|
Spec: corev1.ServiceSpec{
|
|
Selector: map[string]string{
|
|
"app": "private-registry",
|
|
},
|
|
Ports: []corev1.ServicePort{
|
|
{
|
|
Name: "registry-port",
|
|
Port: 5000,
|
|
TargetPort: intstr.FromInt(5000),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
return k8sClient.Create(ctx, registryService)
|
|
}
|
|
|
|
func buildRegistryNetPolicy(ctx context.Context, namespace string) error {
|
|
np := networkingv1.NetworkPolicy{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "private-registry-test-netpol",
|
|
Namespace: namespace,
|
|
},
|
|
Spec: networkingv1.NetworkPolicySpec{
|
|
PodSelector: metav1.LabelSelector{
|
|
MatchLabels: map[string]string{
|
|
"role": "server",
|
|
},
|
|
},
|
|
PolicyTypes: []networkingv1.PolicyType{
|
|
networkingv1.PolicyTypeEgress,
|
|
},
|
|
Egress: []networkingv1.NetworkPolicyEgressRule{
|
|
{
|
|
To: []networkingv1.NetworkPolicyPeer{
|
|
{
|
|
IPBlock: &networkingv1.IPBlock{
|
|
CIDR: "10.0.0.0/8",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
return k8sClient.Create(ctx, &np)
|
|
}
|
|
|
|
func buildRegistryConfigSecret(tlsMap map[string]string, namespace, name string, tlsSecret bool) (*corev1.Secret, error) {
|
|
secretType := corev1.SecretTypeOpaque
|
|
if tlsSecret {
|
|
secretType = corev1.SecretTypeTLS
|
|
}
|
|
|
|
data := make(map[string][]byte)
|
|
|
|
for key, path := range tlsMap {
|
|
b, err := os.ReadFile(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
data[key] = b
|
|
}
|
|
|
|
secret := &corev1.Secret{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Secret",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
},
|
|
Type: secretType,
|
|
Data: data,
|
|
}
|
|
|
|
return secret, nil
|
|
}
|