mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
Set the first daemonset namespace to registration namespace (#536)
Signed-off-by: zhujian <jiazhu@redhat.com>
This commit is contained in:
@@ -147,7 +147,7 @@ func newDaemonSetDecorator(
|
||||
|
||||
func (d *daemonSetDecorator) decorate(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
daemonSet, err := utils.ConvertToDaemonSet(obj)
|
||||
// not a deployment, directly return
|
||||
// not a daemonset, directly return
|
||||
if err != nil {
|
||||
return obj, nil
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (d *daemonSetDecorator) decorate(obj *unstructured.Unstructured) (*unstruct
|
||||
}
|
||||
|
||||
type podTemplateSpecDecorator interface {
|
||||
// decorate modifies the deployment in place
|
||||
// decorate modifies the pod template in place
|
||||
decorate(pod *corev1.PodTemplateSpec) error
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +230,7 @@ func (a *CRDTemplateAgentAddon) getDesiredAddOnTemplateInner(
|
||||
return template.DeepCopy(), nil
|
||||
}
|
||||
|
||||
// TemplateAgentRegistrationNamespaceFunc reads deployment resource in the manifests and use that namespace
|
||||
// TemplateAgentRegistrationNamespaceFunc reads deployment/daemonset resources in the manifests and use that namespace
|
||||
// as the default registration namespace. If addonDeploymentConfig is set, uses the namespace in it.
|
||||
func (a *CRDTemplateAgentAddon) TemplateAgentRegistrationNamespaceFunc(
|
||||
addon *addonapiv1alpha1.ManagedClusterAddOn) (string, error) {
|
||||
@@ -242,8 +242,9 @@ func (a *CRDTemplateAgentAddon) TemplateAgentRegistrationNamespaceFunc(
|
||||
return "", fmt.Errorf("addon %s template not found in status", addon.Name)
|
||||
}
|
||||
|
||||
// pick the namespace of the first deployment
|
||||
// pick the namespace of the first deployment, if there is no deployment, pick the namespace of the first daemonset
|
||||
var desiredNS = "open-cluster-management-agent-addon"
|
||||
var firstDeploymentNamespace, firstDaemonSetNamespace string
|
||||
for _, manifest := range template.Spec.AgentSpec.Workload.Manifests {
|
||||
object := &unstructured.Unstructured{}
|
||||
if err := object.UnmarshalJSON(manifest.Raw); err != nil {
|
||||
@@ -251,12 +252,23 @@ func (a *CRDTemplateAgentAddon) TemplateAgentRegistrationNamespaceFunc(
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err = utils.ConvertToDeployment(object); err != nil {
|
||||
continue
|
||||
if firstDeploymentNamespace == "" {
|
||||
if _, err = utils.ConvertToDeployment(object); err == nil {
|
||||
firstDeploymentNamespace = object.GetNamespace()
|
||||
break
|
||||
}
|
||||
}
|
||||
if firstDaemonSetNamespace == "" {
|
||||
if _, err = utils.ConvertToDaemonSet(object); err == nil {
|
||||
firstDaemonSetNamespace = object.GetNamespace()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
desiredNS = object.GetNamespace()
|
||||
break
|
||||
if firstDeploymentNamespace != "" {
|
||||
desiredNS = firstDeploymentNamespace
|
||||
} else if firstDaemonSetNamespace != "" {
|
||||
desiredNS = firstDaemonSetNamespace
|
||||
}
|
||||
|
||||
overrideNs, err := utils.AgentInstallNamespaceFromDeploymentConfigFunc(
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"open-cluster-management.io/addon-framework/pkg/addonfactory"
|
||||
"open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting"
|
||||
"open-cluster-management.io/addon-framework/pkg/agent"
|
||||
"open-cluster-management.io/addon-framework/pkg/utils"
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake"
|
||||
@@ -34,43 +35,115 @@ import (
|
||||
|
||||
func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
addonName := "hello"
|
||||
addonName := "hello-template"
|
||||
clusterName := "cluster1"
|
||||
|
||||
data, err := os.ReadFile("./testmanifests/addontemplate.yaml")
|
||||
if err != nil {
|
||||
t.Errorf("error reading file: %v", err)
|
||||
}
|
||||
|
||||
s := runtime.NewScheme()
|
||||
_ = scheme.AddToScheme(s)
|
||||
_ = clusterv1apha1.Install(s)
|
||||
_ = addonapiv1alpha1.Install(s)
|
||||
decoder := serializer.NewCodecFactory(s).UniversalDeserializer()
|
||||
|
||||
addonTemplate := &addonapiv1alpha1.AddOnTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello-template",
|
||||
},
|
||||
validatePodTemplate := func(t *testing.T, podTemplate corev1.PodTemplateSpec,
|
||||
expectedEnvs []corev1.EnvVar) {
|
||||
image := podTemplate.Spec.Containers[0].Image
|
||||
if image != "quay.io/ocm/addon-examples:v1" {
|
||||
t.Errorf("unexpected image %v", image)
|
||||
}
|
||||
|
||||
nodeSelector := podTemplate.Spec.NodeSelector
|
||||
expectedNodeSelector := map[string]string{"host": "ssd"}
|
||||
if !equality.Semantic.DeepEqual(nodeSelector, expectedNodeSelector) {
|
||||
t.Errorf("unexpected nodeSelector %v", nodeSelector)
|
||||
}
|
||||
|
||||
tolerations := podTemplate.Spec.Tolerations
|
||||
expectedTolerations := []corev1.Toleration{
|
||||
{
|
||||
Key: "foo", Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
},
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(tolerations, expectedTolerations) {
|
||||
t.Errorf("unexpected tolerations %v", tolerations)
|
||||
}
|
||||
|
||||
envs := podTemplate.Spec.Containers[0].Env
|
||||
if !equality.Semantic.DeepEqual(envs, expectedEnvs) {
|
||||
t.Errorf("unexpected envs %v", envs)
|
||||
}
|
||||
|
||||
volumes := podTemplate.Spec.Volumes
|
||||
expectedVolumes := []corev1.Volume{
|
||||
{
|
||||
Name: "hub-kubeconfig",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "hello-template-hub-kubeconfig",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cert-example-com-signer-name",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "hello-template-example.com-signer-name-client-cert",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(volumes, expectedVolumes) {
|
||||
t.Errorf("expected volumes %+v, but got: %+v", expectedVolumes, volumes)
|
||||
}
|
||||
|
||||
volumeMounts := podTemplate.Spec.Containers[0].VolumeMounts
|
||||
expectedVolumeMounts := []corev1.VolumeMount{
|
||||
{
|
||||
Name: "hub-kubeconfig",
|
||||
MountPath: "/managed/hub-kubeconfig",
|
||||
},
|
||||
{
|
||||
Name: "cert-example-com-signer-name",
|
||||
MountPath: "/managed/example.com-signer-name",
|
||||
},
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(volumeMounts, expectedVolumeMounts) {
|
||||
t.Errorf("expected volumeMounts %v, but got: %v", expectedVolumeMounts, volumeMounts)
|
||||
}
|
||||
}
|
||||
|
||||
decoder := serializer.NewCodecFactory(s).UniversalDeserializer()
|
||||
_, _, err = decoder.Decode(data, nil, addonTemplate)
|
||||
if err != nil {
|
||||
t.Errorf("error decoding file: %v", err)
|
||||
validateAgentOptions := func(t *testing.T, addonOptions agent.AgentAddonOptions,
|
||||
managedClusterAddon *addonapiv1alpha1.ManagedClusterAddOn, expectedNamespace string) {
|
||||
if addonOptions.Registration == nil {
|
||||
t.Fatal("registration is nil")
|
||||
}
|
||||
if addonOptions.Registration.AgentInstallNamespace == nil {
|
||||
t.Fatal("agentInstallNamespace func is nil")
|
||||
}
|
||||
|
||||
registrationNamespace, err := addonOptions.Registration.AgentInstallNamespace(managedClusterAddon)
|
||||
if err != nil {
|
||||
t.Fatalf("execute agent install namespace func error: %v", err)
|
||||
}
|
||||
if registrationNamespace != expectedNamespace {
|
||||
t.Fatalf("agentInstallNamespace func does not return expected value, expect %s but got %s",
|
||||
expectedNamespace, registrationNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
addonTemplate *addonapiv1alpha1.AddOnTemplate
|
||||
addonDeploymentConfig *addonapiv1alpha1.AddOnDeploymentConfig
|
||||
managedClusterAddonBuilder *testManagedClusterAddOnBuilder
|
||||
managedCluster *clusterv1.ManagedCluster
|
||||
expectedErr string
|
||||
validateObjects func(t *testing.T, objects []runtime.Object)
|
||||
name string
|
||||
addonTemplatePath string
|
||||
addonDeploymentConfig *addonapiv1alpha1.AddOnDeploymentConfig
|
||||
managedCluster *clusterv1.ManagedCluster
|
||||
expectedErr string
|
||||
validateObjects func(t *testing.T, objects []runtime.Object)
|
||||
validateAgentOptions func(t *testing.T, o agent.AgentAddonOptions, mca *addonapiv1alpha1.ManagedClusterAddOn)
|
||||
}{
|
||||
{
|
||||
name: "no addon template",
|
||||
addonTemplate: nil,
|
||||
name: "no addon template",
|
||||
addonTemplatePath: "",
|
||||
addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello-config",
|
||||
@@ -85,20 +158,12 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
managedClusterAddonBuilder: newManagedClusterAddonBuilder(
|
||||
&addonapiv1alpha1.ManagedClusterAddOn{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonName,
|
||||
Namespace: clusterName,
|
||||
},
|
||||
},
|
||||
),
|
||||
managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"),
|
||||
expectedErr: fmt.Sprintf("addon %s/%s template not found in status", clusterName, addonName),
|
||||
},
|
||||
{
|
||||
name: "manifests rendered successfully",
|
||||
addonTemplate: addonTemplate,
|
||||
name: "manifests rendered successfully",
|
||||
addonTemplatePath: "./testmanifests/addontemplate.yaml",
|
||||
addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello-config",
|
||||
@@ -132,89 +197,12 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
managedClusterAddonBuilder: newManagedClusterAddonBuilder(
|
||||
&addonapiv1alpha1.ManagedClusterAddOn{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonName,
|
||||
Namespace: clusterName,
|
||||
},
|
||||
},
|
||||
),
|
||||
managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"),
|
||||
validateObjects: func(t *testing.T, objects []runtime.Object) {
|
||||
if len(objects) != 5 {
|
||||
t.Fatalf("expected 5 objects, but got %v", len(objects))
|
||||
}
|
||||
|
||||
validatePodTemplate := func(t *testing.T, podTemplate corev1.PodTemplateSpec) {
|
||||
image := podTemplate.Spec.Containers[0].Image
|
||||
if image != "quay.io/ocm/addon-examples:v1" {
|
||||
t.Errorf("unexpected image %v", image)
|
||||
}
|
||||
|
||||
nodeSelector := podTemplate.Spec.NodeSelector
|
||||
expectedNodeSelector := map[string]string{"host": "ssd"}
|
||||
if !equality.Semantic.DeepEqual(nodeSelector, expectedNodeSelector) {
|
||||
t.Errorf("unexpected nodeSelector %v", nodeSelector)
|
||||
}
|
||||
|
||||
tolerations := podTemplate.Spec.Tolerations
|
||||
expectedTolerations := []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}}
|
||||
if !equality.Semantic.DeepEqual(tolerations, expectedTolerations) {
|
||||
t.Errorf("unexpected tolerations %v", tolerations)
|
||||
}
|
||||
|
||||
envs := podTemplate.Spec.Containers[0].Env
|
||||
expectedEnvs := []corev1.EnvVar{
|
||||
{Name: "LOG_LEVEL", Value: "4"},
|
||||
{Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"},
|
||||
{Name: "CLUSTER_NAME", Value: clusterName},
|
||||
{Name: "INSTALL_NAMESPACE", Value: "test-install-namespace"},
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(envs, expectedEnvs) {
|
||||
t.Errorf("unexpected envs %v", envs)
|
||||
}
|
||||
|
||||
volumes := podTemplate.Spec.Volumes
|
||||
expectedVolumes := []corev1.Volume{
|
||||
{
|
||||
Name: "hub-kubeconfig",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "hello-hub-kubeconfig",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cert-example-com-signer-name",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "hello-example.com-signer-name-client-cert",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(volumes, expectedVolumes) {
|
||||
t.Errorf("expected volumes %v, but got: %v", expectedVolumes, volumes)
|
||||
}
|
||||
|
||||
volumeMounts := podTemplate.Spec.Containers[0].VolumeMounts
|
||||
expectedVolumeMounts := []corev1.VolumeMount{
|
||||
{
|
||||
Name: "hub-kubeconfig",
|
||||
MountPath: "/managed/hub-kubeconfig",
|
||||
},
|
||||
{
|
||||
Name: "cert-example-com-signer-name",
|
||||
MountPath: "/managed/example.com-signer-name",
|
||||
},
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(volumeMounts, expectedVolumeMounts) {
|
||||
t.Errorf("expected volumeMounts %v, but got: %v", expectedVolumeMounts, volumeMounts)
|
||||
}
|
||||
}
|
||||
|
||||
unstructDeployment, ok := objects[0].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be *appsv1.Deployment, but got %T", objects[0])
|
||||
@@ -226,11 +214,17 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
if deployment.Namespace != "test-install-namespace" {
|
||||
t.Errorf("unexpected namespace %s", deployment.Namespace)
|
||||
}
|
||||
validatePodTemplate(t, deployment.Spec.Template)
|
||||
validatePodTemplate(t, deployment.Spec.Template,
|
||||
[]corev1.EnvVar{
|
||||
{Name: "LOG_LEVEL", Value: "4"},
|
||||
{Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"},
|
||||
{Name: "CLUSTER_NAME", Value: clusterName},
|
||||
{Name: "INSTALL_NAMESPACE", Value: "test-install-namespace"},
|
||||
})
|
||||
|
||||
unstructDaemonSet, ok := objects[1].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be *appsv1.DaemonSet, but got %T", objects[0])
|
||||
t.Errorf("expected object to be *appsv1.DaemonSet, but got %T", objects[1])
|
||||
}
|
||||
daemonSet, err := utils.ConvertToDaemonSet(unstructDaemonSet)
|
||||
if err != nil {
|
||||
@@ -239,12 +233,179 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
if daemonSet.Namespace != "test-install-namespace" {
|
||||
t.Errorf("unexpected namespace %s", daemonSet.Namespace)
|
||||
}
|
||||
validatePodTemplate(t, daemonSet.Spec.Template)
|
||||
validatePodTemplate(t, daemonSet.Spec.Template,
|
||||
[]corev1.EnvVar{
|
||||
{Name: "LOG_LEVEL", Value: "4"},
|
||||
{Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"},
|
||||
{Name: "CLUSTER_NAME", Value: clusterName},
|
||||
{Name: "INSTALL_NAMESPACE", Value: "test-install-namespace"},
|
||||
})
|
||||
|
||||
// check clusterrole
|
||||
unstructCRB, ok := objects[3].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be unstructured, but got %T", objects[0])
|
||||
t.Errorf("expected object to be unstructured, but got %T", objects[3])
|
||||
}
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructCRB.Object, clusterRoleBinding)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if clusterRoleBinding.Subjects[0].Namespace != "test-install-namespace" {
|
||||
t.Errorf("clusterRolebinding namespace does not match, got %s",
|
||||
clusterRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
},
|
||||
validateAgentOptions: func(t *testing.T, o agent.AgentAddonOptions,
|
||||
mca *addonapiv1alpha1.ManagedClusterAddOn) {
|
||||
validateAgentOptions(t, o, mca, "test-install-namespace")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "manifests with only daemonset and no namespace cnfigured by adc rendered successfully",
|
||||
addonTemplatePath: "./testmanifests/addontemplate_daemonset.yaml",
|
||||
addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello-config",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{
|
||||
// AgentInstallNamespace: "test-install-namespace",
|
||||
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
|
||||
{
|
||||
Name: "LOG_LEVEL",
|
||||
Value: "4",
|
||||
},
|
||||
},
|
||||
NodePlacement: &addonapiv1alpha1.NodePlacement{
|
||||
NodeSelector: map[string]string{
|
||||
"host": "ssd",
|
||||
},
|
||||
Tolerations: []corev1.Toleration{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
},
|
||||
Registries: []addonapiv1alpha1.ImageMirror{
|
||||
{
|
||||
Source: "quay.io/open-cluster-management",
|
||||
Mirror: "quay.io/ocm",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"),
|
||||
validateObjects: func(t *testing.T, objects []runtime.Object) {
|
||||
if len(objects) != 3 {
|
||||
t.Fatalf("expected 3 objects, but got %v", len(objects))
|
||||
}
|
||||
|
||||
unstructDaemonSet, ok := objects[0].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be *appsv1.DaemonSet, but got %T", objects[0])
|
||||
}
|
||||
daemonSet, err := utils.ConvertToDaemonSet(unstructDaemonSet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if daemonSet.Namespace != "open-cluster-management-agent-addon-ds" { // first daemonset ns
|
||||
t.Errorf("unexpected namespace %s", daemonSet.Namespace)
|
||||
}
|
||||
validatePodTemplate(t, daemonSet.Spec.Template,
|
||||
[]corev1.EnvVar{
|
||||
{Name: "LOG_LEVEL", Value: "4"},
|
||||
{Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"},
|
||||
{Name: "CLUSTER_NAME", Value: clusterName},
|
||||
// {Name: "INSTALL_NAMESPACE", Value: "test-install-namespace"},
|
||||
})
|
||||
|
||||
// check clusterrole
|
||||
unstructCRB, ok := objects[2].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be unstructured, but got %T", objects[2])
|
||||
}
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructCRB.Object, clusterRoleBinding)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if clusterRoleBinding.Subjects[0].Namespace != "open-cluster-management-agent-addon-ds" {
|
||||
t.Errorf("clusterRolebinding namespace does not match, got %s", clusterRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
},
|
||||
validateAgentOptions: func(t *testing.T, o agent.AgentAddonOptions,
|
||||
mca *addonapiv1alpha1.ManagedClusterAddOn) {
|
||||
validateAgentOptions(t, o, mca, "open-cluster-management-agent-addon-ds")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "manifests with only deployment rendered successfully",
|
||||
addonTemplatePath: "./testmanifests/addontemplate_deployment.yaml",
|
||||
addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello-config",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{
|
||||
AgentInstallNamespace: "test-install-namespace",
|
||||
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
|
||||
{
|
||||
Name: "LOG_LEVEL",
|
||||
Value: "4",
|
||||
},
|
||||
},
|
||||
NodePlacement: &addonapiv1alpha1.NodePlacement{
|
||||
NodeSelector: map[string]string{
|
||||
"host": "ssd",
|
||||
},
|
||||
Tolerations: []corev1.Toleration{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: corev1.TolerationOpExists,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
},
|
||||
Registries: []addonapiv1alpha1.ImageMirror{
|
||||
{
|
||||
Source: "quay.io/open-cluster-management",
|
||||
Mirror: "quay.io/ocm",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"),
|
||||
validateObjects: func(t *testing.T, objects []runtime.Object) {
|
||||
if len(objects) != 4 {
|
||||
t.Fatalf("expected 4 objects, but got %v", len(objects))
|
||||
}
|
||||
|
||||
unstructDeployment, ok := objects[0].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be *appsv1.Deployment, but got %T", objects[0])
|
||||
}
|
||||
deployment, err := utils.ConvertToDeployment(unstructDeployment)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if deployment.Namespace != "test-install-namespace" {
|
||||
t.Errorf("unexpected namespace %s", deployment.Namespace)
|
||||
}
|
||||
validatePodTemplate(t, deployment.Spec.Template,
|
||||
[]corev1.EnvVar{
|
||||
{Name: "LOG_LEVEL", Value: "4"},
|
||||
{Name: "HUB_KUBECONFIG", Value: "/managed/hub-kubeconfig/kubeconfig"},
|
||||
{Name: "CLUSTER_NAME", Value: clusterName},
|
||||
{Name: "INSTALL_NAMESPACE", Value: "test-install-namespace"},
|
||||
})
|
||||
|
||||
// check clusterrole
|
||||
unstructCRB, ok := objects[2].(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
t.Errorf("expected object to be unstructured, but got %T", objects[2])
|
||||
}
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructCRB.Object, clusterRoleBinding)
|
||||
@@ -255,38 +416,96 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
t.Errorf("clusterRolebinding namespace does not match, got %s", clusterRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
},
|
||||
validateAgentOptions: func(t *testing.T, o agent.AgentAddonOptions,
|
||||
mca *addonapiv1alpha1.ManagedClusterAddOn) {
|
||||
validateAgentOptions(t, o, mca, "test-install-namespace")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var managedClusterAddon *addonapiv1alpha1.ManagedClusterAddOn
|
||||
var objs []runtime.Object
|
||||
if tc.managedClusterAddonBuilder != nil {
|
||||
if tc.addonTemplate != nil {
|
||||
tc.managedClusterAddonBuilder.withAddonTemplate(tc.addonTemplate)
|
||||
objs = append(objs, tc.addonTemplate)
|
||||
}
|
||||
if tc.addonDeploymentConfig != nil {
|
||||
tc.managedClusterAddonBuilder.withAddonDeploymentConfig(tc.addonDeploymentConfig)
|
||||
objs = append(objs, tc.addonDeploymentConfig)
|
||||
}
|
||||
managedClusterAddon = tc.managedClusterAddonBuilder.build()
|
||||
objs = append(objs, managedClusterAddon)
|
||||
cma := &addonapiv1alpha1.ClusterManagementAddOn{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonName,
|
||||
},
|
||||
Status: addonapiv1alpha1.ClusterManagementAddOnStatus{
|
||||
DefaultConfigReferences: []addonapiv1alpha1.DefaultConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: utils.AddOnTemplateGVR.Group,
|
||||
Resource: utils.AddOnTemplateGVR.Resource,
|
||||
},
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Name: addonName,
|
||||
},
|
||||
SpecHash: "fake-hash",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var addonTemplate *addonapiv1alpha1.AddOnTemplate
|
||||
if len(tc.addonTemplatePath) > 0 {
|
||||
|
||||
data, err := os.ReadFile(tc.addonTemplatePath)
|
||||
if err != nil {
|
||||
t.Errorf("error reading file: %v", err)
|
||||
}
|
||||
|
||||
addonTemplate = &addonapiv1alpha1.AddOnTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonName,
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = decoder.Decode(data, nil, addonTemplate)
|
||||
if err != nil {
|
||||
t.Errorf("error decoding file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var managedClusterAddon *addonapiv1alpha1.ManagedClusterAddOn
|
||||
var objs = []runtime.Object{cma}
|
||||
|
||||
managedClusterAddonBuilder := newManagedClusterAddonBuilder(
|
||||
&addonapiv1alpha1.ManagedClusterAddOn{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: addonName,
|
||||
Namespace: clusterName,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if addonTemplate != nil {
|
||||
managedClusterAddonBuilder.withAddonTemplate(addonTemplate)
|
||||
objs = append(objs, addonTemplate)
|
||||
}
|
||||
if tc.addonDeploymentConfig != nil {
|
||||
managedClusterAddonBuilder.withAddonDeploymentConfig(tc.addonDeploymentConfig)
|
||||
objs = append(objs, tc.addonDeploymentConfig)
|
||||
}
|
||||
managedClusterAddon = managedClusterAddonBuilder.build()
|
||||
objs = append(objs, managedClusterAddon)
|
||||
|
||||
hubKubeClient := fakekube.NewSimpleClientset()
|
||||
addonClient := fakeaddon.NewSimpleClientset(objs...)
|
||||
|
||||
addonInformerFactory := addoninformers.NewSharedInformerFactory(addonClient, 30*time.Minute)
|
||||
cmaStore := addonInformerFactory.Addon().V1alpha1().ClusterManagementAddOns().Informer().GetStore()
|
||||
if err := cmaStore.Add(cma); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if managedClusterAddon != nil {
|
||||
mcaStore := addonInformerFactory.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetStore()
|
||||
if err := mcaStore.Add(managedClusterAddon); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if tc.addonTemplate != nil {
|
||||
if addonTemplate != nil {
|
||||
atStore := addonInformerFactory.Addon().V1alpha1().AddOnTemplates().Informer().GetStore()
|
||||
if err := atStore.Add(tc.addonTemplate); err != nil {
|
||||
if err := atStore.Add(addonTemplate); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -313,7 +532,10 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
|
||||
assert.Equal(t, tc.expectedErr, err.Error(), tc.name)
|
||||
} else {
|
||||
tc.validateObjects(t, objects)
|
||||
addonOptions := agentAddon.GetAgentAddonOptions()
|
||||
tc.validateAgentOptions(t, addonOptions, managedClusterAddon)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,88 @@
|
||||
apiVersion: addon.open-cluster-management.io/v1alpha1
|
||||
kind: AddOnTemplate
|
||||
metadata:
|
||||
name: hello-template
|
||||
spec:
|
||||
addonName: hello-template
|
||||
agentSpec:
|
||||
workload:
|
||||
manifests:
|
||||
- kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: hello-template-agent-ds
|
||||
namespace: open-cluster-management-agent-addon-ds
|
||||
annotations:
|
||||
"addon.open-cluster-management.io/deletion-orphan": ""
|
||||
labels:
|
||||
app: hello-template-agent
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hello-template-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hello-template-agent
|
||||
spec:
|
||||
serviceAccountName: hello-template-agent-sa
|
||||
containers:
|
||||
- name: helloworld-agent
|
||||
image: quay.io/open-cluster-management/addon-examples:v1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/helloworld_helm"
|
||||
- "agent"
|
||||
- "--cluster-name={{CLUSTER_NAME}}"
|
||||
- "--addon-namespace=open-cluster-management-agent-addon-ds"
|
||||
- "--addon-name=hello-template"
|
||||
- "--hub-kubeconfig={{HUB_KUBECONFIG}}"
|
||||
env:
|
||||
- name: LOG_LEVEL
|
||||
value: "{{LOG_LEVEL}}" # addonDeploymentConfig variables
|
||||
- kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello-template-agent-sa
|
||||
namespace: open-cluster-management-agent-addon-ds
|
||||
annotations:
|
||||
"addon.open-cluster-management.io/deletion-orphan": ""
|
||||
- kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hello-template-agent
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hello-template-agent-sa
|
||||
namespace: open-cluster-management-agent-addon-ds
|
||||
registration:
|
||||
# kubeClient or custom signer, if kubeClient, user and group is in a certain format.
|
||||
# user is "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}"
|
||||
# group is ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}",
|
||||
# "system:open-cluster-management:addon:{addonName}", "system:authenticated"]
|
||||
- type: KubeClient
|
||||
kubeClient:
|
||||
hubPermissions:
|
||||
- type: CurrentCluster
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cm-admin
|
||||
- customSigner:
|
||||
signerName: example.com/signer-name
|
||||
signingCA:
|
||||
name: ca-secret
|
||||
namespace: default
|
||||
subject:
|
||||
groups:
|
||||
- g1
|
||||
- g2
|
||||
organizationUnit:
|
||||
- o1
|
||||
- o2
|
||||
user: user1
|
||||
type: CustomSigner
|
||||
@@ -0,0 +1,116 @@
|
||||
apiVersion: addon.open-cluster-management.io/v1alpha1
|
||||
kind: AddOnTemplate
|
||||
metadata:
|
||||
name: hello-template
|
||||
spec:
|
||||
addonName: hello-template
|
||||
agentSpec:
|
||||
workload:
|
||||
manifests:
|
||||
- kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: hello-template-agent
|
||||
namespace: open-cluster-management-agent-addon
|
||||
annotations:
|
||||
"addon.open-cluster-management.io/deletion-orphan": ""
|
||||
labels:
|
||||
app: hello-template-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hello-template-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hello-template-agent
|
||||
spec:
|
||||
serviceAccountName: hello-template-agent-sa
|
||||
containers:
|
||||
- name: helloworld-agent
|
||||
image: quay.io/open-cluster-management/addon-examples:v1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/helloworld_helm"
|
||||
- "agent"
|
||||
- "--cluster-name={{CLUSTER_NAME}}"
|
||||
- "--addon-namespace=open-cluster-management-agent-addon"
|
||||
- "--addon-name=hello-template"
|
||||
- "--hub-kubeconfig={{HUB_KUBECONFIG}}"
|
||||
env:
|
||||
- name: LOG_LEVEL
|
||||
value: "{{LOG_LEVEL}}" # addonDeploymentConfig variables
|
||||
- kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello-template-agent-sa
|
||||
namespace: open-cluster-management-agent-addon
|
||||
annotations:
|
||||
"addon.open-cluster-management.io/deletion-orphan": ""
|
||||
- kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: hello-template-agent
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hello-template-agent-sa
|
||||
namespace: open-cluster-management-agent-addon
|
||||
- kind: Job
|
||||
apiVersion: batch/v1
|
||||
metadata:
|
||||
name: hello-template-cleanup-configmap
|
||||
namespace: open-cluster-management-agent-addon
|
||||
annotations:
|
||||
"addon.open-cluster-management.io/addon-pre-delete": ""
|
||||
spec:
|
||||
manualSelector: true
|
||||
selector:
|
||||
matchLabels:
|
||||
job: hello-template-cleanup-configmap
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
job: hello-template-cleanup-configmap
|
||||
spec:
|
||||
serviceAccountName: hello-template-agent-sa
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: hello-template-agent
|
||||
image: quay.io/open-cluster-management/addon-examples
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/helloworld_helm"
|
||||
- "cleanup"
|
||||
- "--addon-namespace=open-cluster-management-agent-addon"
|
||||
registration:
|
||||
# kubeClient or custom signer, if kubeClient, user and group is in a certain format.
|
||||
# user is "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}"
|
||||
# group is ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}",
|
||||
# "system:open-cluster-management:addon:{addonName}", "system:authenticated"]
|
||||
- type: KubeClient
|
||||
kubeClient:
|
||||
hubPermissions:
|
||||
- type: CurrentCluster
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cm-admin
|
||||
- customSigner:
|
||||
signerName: example.com/signer-name
|
||||
signingCA:
|
||||
name: ca-secret
|
||||
namespace: default
|
||||
subject:
|
||||
groups:
|
||||
- g1
|
||||
- g2
|
||||
organizationUnit:
|
||||
- o1
|
||||
- o2
|
||||
user: user1
|
||||
type: CustomSigner
|
||||
Reference in New Issue
Block a user