upgrade framework and check configured condition in addon template (#641)

Signed-off-by: haoqing0110 <qhao@redhat.com>
This commit is contained in:
Qing Hao
2024-10-09 20:53:45 +08:00
committed by GitHub
parent 851d0152a3
commit 43c3500738
15 changed files with 455 additions and 16 deletions

2
go.mod
View File

@@ -32,7 +32,7 @@ require (
k8s.io/klog/v2 v2.130.1
k8s.io/kube-aggregator v0.30.3
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a
open-cluster-management.io/addon-framework v0.10.1-0.20241009100235-11aa520f541f
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790
open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866
sigs.k8s.io/controller-runtime v0.18.5

4
go.sum
View File

@@ -439,8 +439,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a h1:La1cYE3xkPFS2OJnsPQbkkahKE7yabuPcIISRfb4qsg=
open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a/go.mod h1:C1VETu/CIQKYfMiVAgNzPEUHjCpL9P1Z/KsGhHa4kl4=
open-cluster-management.io/addon-framework v0.10.1-0.20241009100235-11aa520f541f h1:KRnWPOfj69VAF7t/fz2tG6iQ5qhp/rptrwiYU4g41ns=
open-cluster-management.io/addon-framework v0.10.1-0.20241009100235-11aa520f541f/go.mod h1:HHR3Mtntb5OL8A1szTc73yZZ99mU+C1NC9oxe7NTf3M=
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790 h1:XszHWAR6PhYXBFPN4qgk8D5HVl8W/61j+bNMsXVuW7U=
open-cluster-management.io/api v0.14.1-0.20241008081048-f6c658202790/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM=
open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866 h1:nxYrSsYwl9Mq8DuaJ0K98PCpuGsai+AvXbggMfZDCGI=

View File

@@ -131,6 +131,8 @@ func (a *CRDTemplateAgentAddon) GetAgentAddonOptions() agent.AgentAddonOptions {
// if the cluster changes from unknow to true, recheck the health of the addon immediately
utils.ClusterAvailableConditionChanged(old, new)
},
// enable the ConfigCheckEnabled flag to check the configured condition before rendering manifests
ConfigCheckEnabled: true,
}
template, err := a.GetDesiredAddOnTemplate(nil, "", a.addonName)

View File

@@ -262,7 +262,7 @@ func TestAddonTemplateAgentManifests(t *testing.T) {
},
},
{
name: "manifests with only daemonset and no namespace cnfigured by adc rendered successfully",
name: "manifests with only daemonset and no namespace configured by adc rendered successfully",
addonTemplatePath: "./testmanifests/addontemplate_daemonset.yaml",
addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -17,7 +17,7 @@ import (
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
)
var _ = ginkgo.Describe("Agent deploy", func() {
var _ = ginkgo.Describe("Addon install", func() {
suffix := rand.String(5)
var cma *addonapiv1alpha1.ClusterManagementAddOn
var placementNamespace string

View File

@@ -0,0 +1,275 @@
package integration
import (
"context"
"fmt"
"os"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes/scheme"
"open-cluster-management.io/addon-framework/pkg/addonmanager/constants"
"open-cluster-management.io/addon-framework/pkg/utils"
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
)
var _ = ginkgo.Describe("Agent deploy", func() {
var clusterNames []string
var err error
var addonName string
var addonTemplateName string
var addonDeployConfigName string
var addonDeployConfigNamespace string
var placementName string
var placementNamespace string
var manifestWorkName string
ginkgo.BeforeEach(func() {
suffix := rand.String(5)
addonName = fmt.Sprintf("addon-%s", suffix)
addonTemplateName = "hello-template"
addonDeployConfigName = "hello-config"
addonDeployConfigNamespace = "default"
placementName = fmt.Sprintf("ns-%s", suffix)
placementNamespace = fmt.Sprintf("ns-%s", suffix)
manifestWorkName = fmt.Sprintf("%s-0", constants.DeployWorkNamePrefix(addonName))
s := runtime.NewScheme()
_ = scheme.AddToScheme(s)
_ = addonapiv1alpha1.Install(s)
decoder := serializer.NewCodecFactory(s).UniversalDeserializer()
// prepare cluster
for i := 0; i < 2; i++ {
managedClusterName := fmt.Sprintf("managedcluster-%s-%d", suffix, i)
clusterNames = append(clusterNames, managedClusterName)
err = createManagedCluster(hubClusterClient, managedClusterName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
// prepare cma
cma := &addonapiv1alpha1.ClusterManagementAddOn{
ObjectMeta: metav1.ObjectMeta{
Name: addonName,
},
Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{
SupportedConfigs: []addonapiv1alpha1.ConfigMeta{
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group,
Resource: utils.AddOnTemplateGVR.Resource,
},
},
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group,
Resource: utils.AddOnDeploymentConfigGVR.Resource,
},
},
},
InstallStrategy: addonapiv1alpha1.InstallStrategy{
Type: addonapiv1alpha1.AddonInstallStrategyPlacements,
Placements: []addonapiv1alpha1.PlacementStrategy{
{
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace},
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
Type: clusterv1alpha1.Progressive,
Progressive: &clusterv1alpha1.RolloutProgressive{
MaxConcurrency: intstr.FromInt(1),
},
},
Configs: []addonapiv1alpha1.AddOnConfig{
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group,
Resource: utils.AddOnTemplateGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonTemplateName,
},
},
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group,
Resource: utils.AddOnDeploymentConfigGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonDeployConfigName,
Namespace: addonDeployConfigNamespace,
},
},
},
},
},
},
},
}
_, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), cma, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
assertClusterManagementAddOnAnnotations(addonName)
// prepare addon template
var addonTemplate *addonapiv1alpha1.AddOnTemplate
data, err := os.ReadFile("./test/integration/addon/testmanifests/addontemplate.yaml")
gomega.Expect(err).ToNot(gomega.HaveOccurred())
addonTemplate = &addonapiv1alpha1.AddOnTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: addonTemplateName,
},
}
_, _, err = decoder.Decode(data, nil, addonTemplate)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
_, err = hubAddonClient.AddonV1alpha1().AddOnTemplates().Create(context.Background(), addonTemplate, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// prepare addon deployment config
addonDeploymentConfig := &addonapiv1alpha1.AddOnDeploymentConfig{
ObjectMeta: metav1.ObjectMeta{
Name: addonDeployConfigName,
Namespace: addonDeployConfigNamespace,
},
Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{
AgentInstallNamespace: "test-install-namespace",
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
{
Name: "LOG_LEVEL",
Value: "4",
},
},
NodePlacement: &addonapiv1alpha1.NodePlacement{
NodeSelector: map[string]string{
"host": "ssd",
},
Tolerations: []corev1.Toleration{
{
Key: "foo",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
},
},
},
Registries: []addonapiv1alpha1.ImageMirror{
{
Source: "quay.io/open-cluster-management",
Mirror: "quay.io/ocm",
},
},
},
}
_, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(addonDeployConfigNamespace).Create(
context.Background(), addonDeploymentConfig, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// prepare placement
pns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: placementNamespace}}
_, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), pns, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
placement := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: placementName, Namespace: placementNamespace}}
_, err = hubClusterClient.ClusterV1beta1().Placements(placementNamespace).Create(
context.Background(), placement, metav1.CreateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// prepare placement decisions
err = createPlacementDecision(hubClusterClient, placementNamespace, placementName, "0", clusterNames[0], clusterNames[1])
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.AfterEach(func() {
for _, managedClusterName := range clusterNames {
err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
delete(testAddOnConfigsImpl.registrations, managedClusterName)
}
err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(),
addonName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})
ginkgo.It("Should deploy agent for addon template", func() {
ginkgo.By("check mca condition")
assertManagedClusterAddOnConditions(addonName, clusterNames[0], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionTrue,
Reason: "ConfigurationsConfigured",
Message: "Configurations configured",
})
assertManagedClusterAddOnConditions(addonName, clusterNames[1], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionFalse,
Reason: "ConfigurationsNotConfigured",
Message: "Configurations updated and not configured yet",
})
ginkgo.By("check only 1 work rendered")
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[0]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}
if len(work.Items) != 1 {
return fmt.Errorf("Expect 1 work but get %v", work.Items)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[1]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}
if len(work.Items) != 0 {
return fmt.Errorf("Expect 0 work but get %v", work.Items)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
ginkgo.By("update work status to trigger addon status")
updateManifestWorkStatus(hubWorkClient, clusterNames[0], manifestWorkName, metav1.ConditionTrue)
ginkgo.By("check mca condition")
assertManagedClusterAddOnConditions(addonName, clusterNames[1], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionTrue,
Reason: "ConfigurationsConfigured",
Message: "Configurations configured",
})
ginkgo.By("check rendered work")
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[1]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}
if len(work.Items) != 1 {
return fmt.Errorf("Expect 1 work but get %v", work.Items)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
})

View File

@@ -0,0 +1,149 @@
apiVersion: addon.open-cluster-management.io/v1alpha1
kind: AddOnTemplate
metadata:
name: hello-template
spec:
addonName: hello-template
agentSpec:
workload:
manifests:
- kind: Deployment
apiVersion: apps/v1
metadata:
name: hello-template-agent
namespace: open-cluster-management-agent-addon
annotations:
"addon.open-cluster-management.io/deletion-orphan": ""
labels:
app: hello-template-agent
spec:
replicas: 1
selector:
matchLabels:
app: hello-template-agent
template:
metadata:
labels:
app: hello-template-agent
spec:
serviceAccountName: hello-template-agent-sa
containers:
- name: helloworld-agent
image: quay.io/open-cluster-management/addon-examples:v1
imagePullPolicy: IfNotPresent
args:
- "/helloworld_helm"
- "agent"
- "--cluster-name={{CLUSTER_NAME}}"
- "--addon-namespace=open-cluster-management-agent-addon"
- "--addon-name=hello-template"
- "--hub-kubeconfig={{HUB_KUBECONFIG}}"
env:
- name: LOG_LEVEL
value: "{{LOG_LEVEL}}" # addonDeploymentConfig variables
- kind: DaemonSet
apiVersion: apps/v1
metadata:
name: hello-template-agent-ds
namespace: open-cluster-management-agent-addon
annotations:
"addon.open-cluster-management.io/deletion-orphan": ""
labels:
app: hello-template-agent
spec:
selector:
matchLabels:
app: hello-template-agent
template:
metadata:
labels:
app: hello-template-agent
spec:
serviceAccountName: hello-template-agent-sa
containers:
- name: helloworld-agent
image: quay.io/open-cluster-management/addon-examples:v1
imagePullPolicy: IfNotPresent
args:
- "/helloworld_helm"
- "agent"
- "--cluster-name={{CLUSTER_NAME}}"
- "--addon-namespace=open-cluster-management-agent-addon"
- "--addon-name=hello-template"
- "--hub-kubeconfig={{HUB_KUBECONFIG}}"
env:
- name: LOG_LEVEL
value: "{{LOG_LEVEL}}" # addonDeploymentConfig variables
- kind: ServiceAccount
apiVersion: v1
metadata:
name: hello-template-agent-sa
namespace: open-cluster-management-agent-addon
annotations:
"addon.open-cluster-management.io/deletion-orphan": ""
- kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hello-template-agent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: hello-template-agent-sa
namespace: open-cluster-management-agent-addon
- kind: Job
apiVersion: batch/v1
metadata:
name: hello-template-cleanup-configmap
namespace: open-cluster-management-agent-addon
annotations:
"addon.open-cluster-management.io/addon-pre-delete": ""
spec:
manualSelector: true
selector:
matchLabels:
job: hello-template-cleanup-configmap
template:
metadata:
labels:
job: hello-template-cleanup-configmap
spec:
serviceAccountName: hello-template-agent-sa
restartPolicy: Never
containers:
- name: hello-template-agent
image: quay.io/open-cluster-management/addon-examples
imagePullPolicy: IfNotPresent
args:
- "/helloworld_helm"
- "cleanup"
- "--addon-namespace=open-cluster-management-agent-addon"
registration:
# kubeClient or custom signer, if kubeClient, user and group is in a certain format.
# user is "system:open-cluster-management:cluster:{clusterName}:addon:{addonName}:agent:{agentName}"
# group is ["system:open-cluster-management:cluster:{clusterName}:addon:{addonName}",
# "system:open-cluster-management:addon:{addonName}", "system:authenticated"]
- type: KubeClient
kubeClient:
hubPermissions:
- type: CurrentCluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cm-admin
- customSigner:
signerName: example.com/signer-name
signingCA:
name: ca-secret
namespace: default
subject:
groups:
- g1
- g2
organizationUnit:
- o1
- o2
user: user1
type: CustomSigner

2
vendor/modules.txt vendored
View File

@@ -1512,7 +1512,7 @@ k8s.io/utils/pointer
k8s.io/utils/ptr
k8s.io/utils/strings/slices
k8s.io/utils/trace
# open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a
# open-cluster-management.io/addon-framework v0.10.1-0.20241009100235-11aa520f541f
## explicit; go 1.22.0
open-cluster-management.io/addon-framework/pkg/addonfactory
open-cluster-management.io/addon-framework/pkg/addonmanager

View File

@@ -110,6 +110,12 @@ func (f *AgentAddonFactory) WithAgentHostedInfoFn(
return f
}
// WithConfigCheckEnabledOption will enable the configured condition check before rendering manifests.
func (f *AgentAddonFactory) WithConfigCheckEnabledOption() *AgentAddonFactory {
f.agentAddonOptions.ConfigCheckEnabled = true
return f
}
// WithTrimCRDDescription is to enable trim the description of CRDs in manifestWork.
func (f *AgentAddonFactory) WithTrimCRDDescription() *AgentAddonFactory {
f.trimCRDDescription = true

View File

@@ -231,9 +231,6 @@ func (a *HelmAgentAddon) getValueAgentInstallNamespace(addon *addonapiv1alpha1.M
}
if len(ns) > 0 {
installNamespace = ns
} else {
klog.InfoS("Namespace for addon returned by agent install namespace func is empty",
"addonNamespace", addon.Namespace, "addonName", addon)
}
}
return installNamespace, nil

View File

@@ -135,9 +135,6 @@ func (a *TemplateAgentAddon) getBuiltinValues(
}
if len(ns) > 0 {
installNamespace = ns
} else {
klog.InfoS("Namespace for addon returned by agent install namespace func is empty",
"addonNamespace", addon.Namespace, "addonName", addon)
}
}
builtinValues.AddonInstallNamespace = installNamespace

View File

@@ -381,6 +381,12 @@ func (c *addonDeployController) buildDeployManifestWorksFunc(addonWorkBuilder *a
return nil, nil, fmt.Errorf("failed to get agentAddon")
}
if agentAddon.GetAgentAddonOptions().ConfigCheckEnabled &&
!meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionConfigured) {
klog.InfoS("Addon configured condition is not set in status", "addonName", addon.Name)
return nil, nil, nil
}
objects, err := agentAddon.Manifests(cluster, addon)
if err != nil {
meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{
@@ -443,6 +449,12 @@ func (c *addonDeployController) buildHookManifestWorkFunc(addonWorkBuilder *addo
return nil, fmt.Errorf("failed to get agentAddon")
}
if agentAddon.GetAgentAddonOptions().ConfigCheckEnabled &&
!meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionConfigured) {
klog.InfoS("Addon configured condition is not set in status", "addonName", addon.Name)
return nil, nil
}
objects, err := agentAddon.Manifests(cluster, addon)
if err != nil {
meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{

View File

@@ -169,9 +169,6 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory.
}
if len(ns) > 0 {
managedClusterAddonCopy.Status.Namespace = ns
} else {
klog.InfoS("Namespace for addon returned by agent install namespace func is empty",
"addonNamespace", managedClusterAddonCopy.Namespace, "addonName", managedClusterAddonCopy.Name)
}
}

View File

@@ -91,6 +91,11 @@ type AgentAddonOptions struct {
// json path which is already in the existing rules, compare by the path name.
// +optional
ManifestConfigs []workapiv1.ManifestConfigOption
// ConfigCheckEnabled defines whether to check the configured condition before rendering manifests.
// If not set, will be defaulted to false.
// +optional
ConfigCheckEnabled bool
}
type CSRSignerFunc func(csr *certificatesv1.CertificateSigningRequest) []byte

View File

@@ -71,7 +71,6 @@ func GetDesiredAddOnDeploymentConfig(
ok, configRef := GetAddOnConfigRef(addon.Status.ConfigReferences,
AddOnDeploymentConfigGVR.Group, AddOnDeploymentConfigGVR.Resource)
if !ok {
klog.InfoS("Addon deployment config in status is empty", "addonName", addon.Name)
return nil, nil
}