Cherry pick enable-sync-lables to 0.13 (#515)

* sync labels from klusterlet to all agent resources (#475)

Signed-off-by: Zhiwei Yin <zyin@redhat.com>

* add enable-sync-labels flag to klusterlet operator (#505)

Signed-off-by: Zhiwei Yin <zyin@redhat.com>

* fix issue that pull secret and ns are synced labels when enable-sync-labels is disabled (#511)

Signed-off-by: Zhiwei Yin <zyin@redhat.com>

---------

Signed-off-by: Zhiwei Yin <zyin@redhat.com>
This commit is contained in:
Zhiwei Yin
2024-06-13 16:34:57 +08:00
committed by GitHub
parent c41fe8c97d
commit 353a850f8d
43 changed files with 577 additions and 196 deletions

View File

@@ -7,6 +7,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:{{ .KlusterletName }}-registration:addon-management
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# Allow agent to get/list/watch/create/delete/update/patch secrets.
- apiGroups: [""]

View File

@@ -4,6 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:{{ .KlusterletName }}-registration:agent
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# Allow agent to get/list/watch nodes
# list nodes to calculates the capacity and allocatable resources of the managed cluster

View File

@@ -7,6 +7,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-registration:addon-management
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,6 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-registration:agent
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,5 +3,11 @@ kind: ServiceAccount
metadata:
name: {{ .RegistrationServiceAccount }}
namespace: {{ .KlusterletNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
imagePullSecrets:
- name: open-cluster-management-image-pull-credentials

View File

@@ -6,6 +6,11 @@ metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:execution
labels:
open-cluster-management.io/aggregate-to-work: "true"
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# Allow agent to get/list/watch/create/delete crds.
- apiGroups: ["apiextensions.k8s.io"]

View File

@@ -4,6 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:agent
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# Allow agent to managed appliedmanifestworks
- apiGroups: ["work.open-cluster-management.io"]

View File

@@ -4,6 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:aggregate
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -4,6 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:execution-admin
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,6 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:execution
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,6 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:agent
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,5 +3,11 @@ kind: ServiceAccount
metadata:
name: {{ .WorkServiceAccount }}
namespace: {{ .KlusterletNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
imagePullSecrets:
- name: open-cluster-management-image-pull-credentials

View File

@@ -5,7 +5,11 @@ metadata:
namespace: {{ .AgentNamespace }}
labels:
app: klusterlet-agent
createdBy: klusterlet
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
replicas: {{ .Replica }}
selector:
@@ -17,6 +21,11 @@ spec:
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
labels:
app: klusterlet-agent
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
{{if .HubApiServerHostAlias }}
hostAliases:

View File

@@ -4,6 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-registration:addon-management
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# Allow agent to get/list/watch/create/delete/update/patch secrets, registration agent needs secret permission for an
# arbitrary namespace to create hub-kubeconfig secret for an addon

View File

@@ -5,6 +5,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-registration:addon-management
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -5,7 +5,11 @@ metadata:
namespace: {{ .AgentNamespace }}
labels:
app: klusterlet-registration-agent
createdBy: klusterlet
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
replicas: {{ .Replica }}
selector:
@@ -17,6 +21,11 @@ spec:
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
labels:
app: klusterlet-registration-agent
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
{{if .HubApiServerHostAlias }}
hostAliases:

View File

@@ -5,6 +5,12 @@ kind: Role
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-registration:agent
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# leader election needs to operate configmaps and leases
- apiGroups: [""]

View File

@@ -3,6 +3,12 @@ kind: RoleBinding
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-registration:agent
namespace: kube-system
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -4,6 +4,12 @@ kind: RoleBinding
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-registration:agent
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -3,5 +3,11 @@ kind: ServiceAccount
metadata:
name: {{ .RegistrationServiceAccount }}
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
imagePullSecrets:
- name: open-cluster-management-image-pull-credentials

View File

@@ -7,6 +7,12 @@ kind: Role
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}:extension-apiserver
namespace: kube-system
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
- apiGroups: [""]
resources: ["configmaps"]

View File

@@ -5,7 +5,11 @@ metadata:
namespace: {{ .AgentNamespace }}
labels:
app: klusterlet-manifestwork-agent
createdBy: klusterlet
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
replicas: {{ .Replica }}
selector:
@@ -17,6 +21,11 @@ spec:
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
labels:
app: klusterlet-manifestwork-agent
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
{{if .HubApiServerHostAlias }}
hostAliases:

View File

@@ -5,6 +5,12 @@ kind: Role
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-work:agent
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
rules:
# leader election needs to operate configmaps and leases
- apiGroups: [""]

View File

@@ -3,6 +3,12 @@ kind: RoleBinding
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-work:agent
namespace: kube-system
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -4,6 +4,12 @@ kind: RoleBinding
metadata:
name: open-cluster-management:management:{{ .KlusterletName }}-work:agent
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role

View File

@@ -3,5 +3,11 @@ kind: ServiceAccount
metadata:
name: {{ .WorkServiceAccount }}
namespace: {{ .AgentNamespace }}
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
imagePullSecrets:
- name: open-cluster-management-image-pull-credentials

View File

@@ -3,6 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-registration-operator:operator-kube111
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -3,6 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:agent-kube111
labels:
{{ if gt (len .Labels) 0 }}
{{ range $key, $value := .Labels }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -34,6 +34,9 @@ func NewKlusterletOperatorCmd() *cobra.Command {
cmd.Flags().BoolVar(&klOptions.SkipPlaceholderHubSecret, "skip-placeholder-hub-secret", false,
"If set, will skip ensuring a placeholder hub secret which is originally intended for pulling "+
"work image before approved")
flags.BoolVar(&klOptions.EnableSyncLabels, "enable-sync-labels", false,
"If set, will sync the labels of Klusterlet CR to all agent resources")
opts.AddFlags(flags)
return cmd

View File

@@ -47,6 +47,9 @@ const (
FeatureGatesTypeValid = "ValidFeatureGates"
FeatureGatesReasonAllValid = "FeatureGatesAllValid"
FeatureGatesReasonInvalidExisting = "InvalidFeatureGatesExisting"
// AgentLabelKey is used to filter resources in informers
AgentLabelKey = "createdByKlusterlet"
)
var (
@@ -649,7 +652,7 @@ func AgentPriorityClassName(klusterlet *operatorapiv1.Klusterlet, kubeVersion *v
// https://github.com/openshift/library-go/blob/d9cdfbd844ea08465b938c46a16bed2ea23207e4/pkg/operator/resource/resourceapply/core.go#L357,
// add an addition targetClient parameter to support sync secret to another cluster.
func SyncSecret(ctx context.Context, client, targetClient coreclientv1.SecretsGetter, recorder events.Recorder,
sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) {
sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.Secret, bool, error) {
source, err := client.Secrets(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
@@ -690,6 +693,7 @@ func SyncSecret(ctx context.Context, client, targetClient coreclientv1.SecretsGe
source.Name = targetName
source.ResourceVersion = ""
source.OwnerReferences = ownerRefs
source.Labels = labels
return resourceapply.ApplySecret(ctx, targetClient, recorder, source)
}
}
@@ -795,3 +799,24 @@ func IsSingleton(mode operatorapiv1.InstallMode) bool {
func IsHosted(mode operatorapiv1.InstallMode) bool {
return mode == operatorapiv1.InstallModeHosted || mode == operatorapiv1.InstallModeSingletonHosted
}
func GetKlusterletAgentLabels(klusterlet *operatorapiv1.Klusterlet) map[string]string {
labels := klusterlet.GetLabels()
if labels == nil {
labels = map[string]string{}
}
// This label key is used to filter resources in deployment informer
labels[AgentLabelKey] = klusterlet.GetName()
return labels
}
func MapCompare(required, existing map[string]string) bool {
for k, v := range required {
if existing[k] != v {
return false
}
}
return true
}

View File

@@ -1477,7 +1477,8 @@ func TestSyncSecret(t *testing.T) {
clientTarget := fakekube.NewSimpleClientset()
secret, changed, err := SyncSecret(
context.TODO(), client.CoreV1(), clientTarget.CoreV1(),
events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName, tc.targetNamespace, tc.targetName, tc.ownerRefs)
events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName,
tc.targetNamespace, tc.targetName, tc.ownerRefs, nil)
if (err == nil && len(tc.expectedErr) != 0) || (err != nil && err.Error() != tc.expectedErr) {
t.Errorf("%s: expected error %v, got %v", tc.name, tc.expectedErr, err)

View File

@@ -96,12 +96,12 @@ func SATokenCreater(ctx context.Context, saName, saNamespace string, saClient ku
func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kubeconfigPath string,
templateKubeconfig *rest.Config, secretClient coreclientv1.SecretsGetter,
tokenGetter TokenGetterFunc, recorder events.Recorder) error {
tokenGetter TokenGetterFunc, recorder events.Recorder, labels map[string]string) error {
secret, err := secretClient.Secrets(secretNamespace).Get(ctx, secretName, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace,
kubeconfigPath, secretClient, tokenGetter, recorder)
kubeconfigPath, secretClient, tokenGetter, recorder, labels)
case err != nil:
return err
}
@@ -110,7 +110,8 @@ func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kube
return nil
}
return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace, kubeconfigPath, secretClient, tokenGetter, recorder)
return applyKubeconfigSecret(ctx, templateKubeconfig, secretName, secretNamespace, kubeconfigPath,
secretClient, tokenGetter, recorder, labels)
}
func tokenValid(secret *corev1.Secret) bool {
@@ -181,7 +182,7 @@ func clusterInfoNotChanged(secret *corev1.Secret, templateKubeconfig *rest.Confi
// applyKubeconfigSecret would render saToken to a secret.
func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config, secretName, secretNamespace,
kubeconfigPath string, secretClient coreclientv1.SecretsGetter, tokenGetter TokenGetterFunc,
recorder events.Recorder) error {
recorder events.Recorder, labels map[string]string) error {
token, expiration, err := tokenGetter()
if err != nil {
@@ -220,6 +221,7 @@ func applyKubeconfigSecret(ctx context.Context, templateKubeconfig *rest.Config,
ObjectMeta: metav1.ObjectMeta{
Namespace: secretNamespace,
Name: secretName,
Labels: labels,
},
Data: map[string][]byte{
"kubeconfig": kubeconfigContent,

View File

@@ -197,7 +197,7 @@ func TestApplyKubeconfigSecret(t *testing.T) {
err := SyncKubeConfigSecret(
context.TODO(), secretName, secretNamespace,
"/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter,
eventstesting.NewTestingEventRecorder(t))
eventstesting.NewTestingEventRecorder(t), nil)
if err != nil && !tt.wantErr {
t.Error(err)
}

View File

@@ -317,7 +317,7 @@ func ensureSAKubeconfigs(ctx context.Context, clusterManagerName, clusterManager
TLSClientConfig: rest.TLSClientConfig{
CAData: hubKubeConfig.CAData,
},
}, managementClient.CoreV1(), tokenGetter, recorder)
}, managementClient.CoreV1(), tokenGetter, recorder, nil)
if err != nil {
return err
}

View File

@@ -80,6 +80,7 @@ func (c *addonPullImageSecretController) sync(ctx context.Context, controllerCon
namespace,
imagePullSecret,
[]metav1.OwnerReference{},
nil,
)
if err != nil {
return err

View File

@@ -32,7 +32,8 @@ func TestSyncDelete(t *testing.T) {
newAppliedManifestWorks("testhost-2", []string{workv1.AppliedManifestWorkFinalizer}, false),
}
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), appliedManifestWorks, namespace, bootstrapKubeConfigSecret)
controller := newTestController(t, klusterlet, syncContext.Recorder(), appliedManifestWorks, false,
namespace, bootstrapKubeConfigSecret)
err := controller.cleanupController.sync(context.TODO(), syncContext)
if err != nil {

View File

@@ -56,6 +56,7 @@ type klusterletController struct {
skipHubSecretPlaceholder bool
cache resourceapply.ResourceCache
managedClusterClientsBuilder managedClusterClientsBuilderInterface
enableSyncLabels bool
}
type klusterletReconcile interface {
@@ -82,7 +83,8 @@ func NewKlusterletController(
kubeVersion *version.Version,
operatorNamespace string,
recorder events.Recorder,
skipHubSecretPlaceholder bool) factory.Controller {
skipHubSecretPlaceholder bool,
enableSyncLabels bool) factory.Controller {
controller := &klusterletController{
kubeClient: kubeClient,
patcher: patcher.NewPatcher[
@@ -93,6 +95,7 @@ func NewKlusterletController(
skipHubSecretPlaceholder: skipHubSecretPlaceholder,
cache: resourceapply.NewResourceCache(),
managedClusterClientsBuilder: newManagedClusterClientsBuilder(kubeClient, apiExtensionClient, appliedManifestWorkClient, recorder),
enableSyncLabels: enableSyncLabels,
}
return factory.New().WithSync(controller.sync).
@@ -160,6 +163,9 @@ type klusterletConfig struct {
// ResourceRequirements is the resource requirements for the klusterlet managed containers.
// The type has to be []byte to use "indent" template function.
ResourceRequirements []byte
// Labels of the agents are synced from klusterlet CR.
Labels map[string]string
}
func (n *klusterletController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
@@ -210,6 +216,10 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
ResourceRequirements: resourceRequirements,
}
if n.enableSyncLabels {
config.Labels = helpers.GetKlusterletAgentLabels(klusterlet)
}
managedClusterClients, err := n.managedClusterClientsBuilder.
withMode(config.InstallMode).
withKubeConfigSecret(config.AgentNamespace, config.ExternalManagedKubeConfigSecret).
@@ -305,17 +315,20 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
kubeVersion: n.kubeVersion,
opratorNamespace: n.operatorNamespace,
recorder: controllerContext.Recorder(),
cache: n.cache},
cache: n.cache,
enableSyncLabels: n.enableSyncLabels},
&managementReconcile{
kubeClient: n.kubeClient,
operatorNamespace: n.operatorNamespace,
recorder: controllerContext.Recorder(),
cache: n.cache},
cache: n.cache,
enableSyncLabels: n.enableSyncLabels},
&runtimeReconcile{
managedClusterClients: managedClusterClients,
kubeClient: n.kubeClient,
recorder: controllerContext.Recorder(),
cache: n.cache},
cache: n.cache,
enableSyncLabels: n.enableSyncLabels},
}
var errs []error
@@ -388,7 +401,7 @@ func ensureAgentNamespace(ctx context.Context, kubeClient kubernetes.Interface,
// syncPullSecret will sync pull secret from the sourceClient cluster to the targetClient cluster in desired namespace.
func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.Interface,
klusterlet *operatorapiv1.Klusterlet, operatorNamespace, namespace string, recorder events.Recorder) error {
klusterlet *operatorapiv1.Klusterlet, operatorNamespace, namespace string, labels map[string]string, recorder events.Recorder) error {
_, _, err := helpers.SyncSecret(
ctx,
sourceClient.CoreV1(),
@@ -399,6 +412,7 @@ func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.I
namespace,
imagePullSecret,
[]metav1.OwnerReference{},
labels,
)
if err != nil {
@@ -410,9 +424,23 @@ func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.I
return nil
}
func ensureNamespace(ctx context.Context, kubeClient kubernetes.Interface, klusterlet *operatorapiv1.Klusterlet,
namespace string, recorder events.Recorder) error {
if err := ensureAgentNamespace(ctx, kubeClient, namespace, recorder); err != nil {
// ensureNamespace is to apply the namespace defined in klusterlet spec to the managed cluster. The namespace
// will have a klusterlet label.
func ensureNamespace(
ctx context.Context,
kubeClient kubernetes.Interface,
klusterlet *operatorapiv1.Klusterlet,
namespace string, labels map[string]string, recorder events.Recorder) error {
_, _, err := resourceapply.ApplyNamespace(ctx, kubeClient.CoreV1(), recorder, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Annotations: map[string]string{
"workload.openshift.io/allowed": "management",
},
Labels: labels,
},
})
if err != nil {
meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{
Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed",
Message: fmt.Sprintf("Failed to ensure namespace %q: %v", namespace, err)})

View File

@@ -172,7 +172,7 @@ func newServiceAccount(name, namespace string, referenceSecret string) *corev1.S
}
func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, recorder events.Recorder,
appliedManifestWorks []runtime.Object, objects ...runtime.Object) *testController {
appliedManifestWorks []runtime.Object, enableSyncLabels bool, objects ...runtime.Object) *testController {
fakeKubeClient := fakekube.NewSimpleClientset(objects...)
fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset()
fakeOperatorClient := fakeoperatorclient.NewSimpleClientset(klusterlet)
@@ -190,6 +190,7 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, recor
cache: resourceapply.NewResourceCache(),
managedClusterClientsBuilder: newManagedClusterClientsBuilder(fakeKubeClient, fakeAPIExtensionClient,
fakeWorkClient.WorkV1().AppliedManifestWorks(), recorder),
enableSyncLabels: enableSyncLabels,
}
cleanupController := &klusterletCleanupController{
@@ -454,13 +455,18 @@ func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, cl
}
}
func ensureObject(t *testing.T, object runtime.Object, klusterlet *operatorapiv1.Klusterlet) {
func ensureObject(t *testing.T, object runtime.Object, klusterlet *operatorapiv1.Klusterlet, enableSyncLabels bool) {
access, err := meta.Accessor(object)
if err != nil {
t.Errorf("Unable to access objectmeta: %v", err)
return
}
if enableSyncLabels && !helpers.MapCompare(helpers.GetKlusterletAgentLabels(klusterlet), access.GetLabels()) {
t.Errorf("the labels of klusterlet are not synced to %v", access.GetName())
return
}
namespace := helpers.AgentNamespace(klusterlet)
switch o := object.(type) {
case *appsv1.Deployment:
@@ -497,124 +503,166 @@ func ensureObject(t *testing.T, object runtime.Object, klusterlet *operatorapiv1
// TestSyncDeploy test deployment of klusterlet components
func TestSyncDeploy(t *testing.T) {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
cases := []struct {
name string
enableSyncLabels bool
}{
{
name: "disable sync labels",
enableSyncLabels: false,
},
{
name: "enable sync labels",
enableSyncLabels: true,
},
}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
for _, c := range cases {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
}
t.Run(c.name, func(t *testing.T) {
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, c.enableSyncLabels,
bootStrapSecret, hubKubeConfigSecret, namespace)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
}
}
// Check if resources are created as expected
// 11 managed static manifests + 12 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
if len(createObjects) != 24 {
t.Errorf("Expect 24 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet, false)
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
}
if len(createCRDObjects) != 2 {
t.Errorf("Expect 2 objects created in the sync loop, actual %d", len(createCRDObjects))
}
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
klusterlet = &operatorapiv1.Klusterlet{}
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
err = json.Unmarshal(patchData, klusterlet)
if err != nil {
t.Fatal(err)
}
testinghelper.AssertOnlyConditions(
t, klusterlet,
testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
)
})
}
// Check if resources are created as expected
// 11 managed static manifests + 12 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
if len(createObjects) != 24 {
t.Errorf("Expect 24 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet)
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
}
if len(createCRDObjects) != 2 {
t.Errorf("Expect 2 objects created in the sync loop, actual %d", len(createCRDObjects))
}
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
klusterlet = &operatorapiv1.Klusterlet{}
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
err = json.Unmarshal(patchData, klusterlet)
if err != nil {
t.Fatal(err)
}
testinghelper.AssertOnlyConditions(
t, klusterlet,
testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
)
}
func TestSyncDeploySingleton(t *testing.T) {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
klusterlet.Spec.DeployOption.Mode = operatorapiv1.InstallModeSingleton
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
cases := []struct {
name string
enableSyncLabels bool
}{
{
name: "disable sync labels",
enableSyncLabels: false,
},
{
name: "enable sync labels",
enableSyncLabels: true,
},
}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
for _, c := range cases {
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
klusterlet.SetLabels(map[string]string{"test": "test", "abc": "abc"})
klusterlet.Spec.DeployOption.Mode = operatorapiv1.InstallModeSingleton
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
}
t.Run(c.name, func(t *testing.T) {
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil,
c.enableSyncLabels, bootStrapSecret, hubKubeConfigSecret, namespace)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
}
}
// Check if resources are created as expected
// 10 managed static manifests + 11 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments
if len(createObjects) != 22 {
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet, false)
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
}
if len(createCRDObjects) != 2 {
t.Errorf("Expect 2 objects created in the sync loop, actual %d", len(createCRDObjects))
}
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
klusterlet = &operatorapiv1.Klusterlet{}
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
err = json.Unmarshal(patchData, klusterlet)
if err != nil {
t.Fatal(err)
}
testinghelper.AssertOnlyConditions(
t, klusterlet,
testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
)
})
}
// Check if resources are created as expected
// 10 managed static manifests + 11 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments
if len(createObjects) != 22 {
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet)
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
}
if len(createCRDObjects) != 2 {
t.Errorf("Expect 2 objects created in the sync loop, actual %d", len(createCRDObjects))
}
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
klusterlet = &operatorapiv1.Klusterlet{}
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
err = json.Unmarshal(patchData, klusterlet)
if err != nil {
t.Fatal(err)
}
testinghelper.AssertOnlyConditions(
t, klusterlet,
testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
)
}
// TestSyncDeployHosted test deployment of klusterlet components in hosted mode
@@ -658,7 +706,7 @@ func TestSyncDeployHosted(t *testing.T) {
t.Errorf("Expect 16 objects created in the sync loop, actual %d", len(createObjectsManagement))
}
for _, object := range createObjectsManagement {
ensureObject(t, object, klusterlet)
ensureObject(t, object, klusterlet, false)
}
var createObjectsManaged []runtime.Object
@@ -676,7 +724,7 @@ func TestSyncDeployHosted(t *testing.T) {
t.Errorf("Expect 15 objects created in the sync loop, actual %d", len(createObjectsManaged))
}
for _, object := range createObjectsManaged {
ensureObject(t, object, klusterlet)
ensureObject(t, object, klusterlet, false)
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
@@ -801,7 +849,8 @@ func TestReplica(t *testing.T) {
}
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, objects...)
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
objects...)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
@@ -866,7 +915,8 @@ func TestClusterNameChange(t *testing.T) {
hubSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
hubSecret.Data["cluster-name"] = []byte("cluster1")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubSecret, namespace)
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
bootStrapSecret, hubSecret, namespace)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
@@ -954,7 +1004,8 @@ func TestSyncWithPullSecret(t *testing.T) {
namespace := newNamespace("testns")
pullSecret := newSecret(imagePullSecret, "open-cluster-management")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace, pullSecret)
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
bootStrapSecret, hubKubeConfigSecret, namespace, pullSecret)
err := controller.controller.sync(context.TODO(), syncContext)
if err != nil {
@@ -983,7 +1034,8 @@ func TestDeployOnKube111(t *testing.T) {
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
namespace := newNamespace("testns")
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace)
controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, false,
bootStrapSecret, hubKubeConfigSecret, namespace)
kubeVersion, _ := version.ParseGeneric("v1.11.0")
controller.controller.kubeVersion = kubeVersion
controller.cleanupController.kubeVersion = kubeVersion
@@ -1010,7 +1062,7 @@ func TestDeployOnKube111(t *testing.T) {
t.Errorf("Expect 26 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet)
ensureObject(t, object, klusterlet, false)
}
operatorAction := controller.operatorClient.Actions()

View File

@@ -59,16 +59,22 @@ type managedReconcile struct {
kubeVersion *version.Version
recorder events.Recorder
cache resourceapply.ResourceCache
enableSyncLabels bool
}
func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
labels := map[string]string{}
if r.enableSyncLabels {
labels = helpers.GetKlusterletAgentLabels(klusterlet)
}
// For now, whether in Default or Hosted mode, the addons will be deployed on the managed cluster.
// sync image pull secret from management cluster to managed cluster for addon namespace
// TODO(zhujian7): In the future, we may consider deploy addons on the management cluster in Hosted mode.
addonNamespace := fmt.Sprintf("%s-addon", config.KlusterletNamespace)
// Ensure the addon namespace on the managed cluster
err := ensureNamespace(ctx, r.managedClusterClients.kubeClient, klusterlet, addonNamespace, r.recorder)
err := ensureNamespace(ctx, r.managedClusterClients.kubeClient, klusterlet, addonNamespace, labels, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}
@@ -77,7 +83,7 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap
// addonsecretcontroller only watch namespaces in the same cluster klusterlet is running on.
// And if addons are deployed in default mode on the managed cluster, but klusterlet is deployed in hosted
// on management cluster, then we still need to sync the secret here in klusterlet-controller using `managedClusterClients.kubeClient`.
err = syncPullSecret(ctx, r.kubeClient, r.managedClusterClients.kubeClient, klusterlet, r.opratorNamespace, addonNamespace, r.recorder)
err = syncPullSecret(ctx, r.kubeClient, r.managedClusterClients.kubeClient, klusterlet, r.opratorNamespace, addonNamespace, labels, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}
@@ -85,7 +91,7 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap
if helpers.IsHosted(config.InstallMode) {
// In hosted mode, we should ensure the namespace on the managed cluster since
// some resources(eg:service account) are still deployed on managed cluster.
err := ensureNamespace(ctx, r.managedClusterClients.kubeClient, klusterlet, config.KlusterletNamespace, r.recorder)
err := ensureNamespace(ctx, r.managedClusterClients.kubeClient, klusterlet, config.KlusterletNamespace, labels, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}
@@ -159,6 +165,9 @@ func (r *managedReconcile) createAggregationRule(ctx context.Context, klusterlet
},
Rules: []rbacv1.PolicyRule{},
}
if r.enableSyncLabels {
aggregateClusterRole.SetLabels(helpers.GetKlusterletAgentLabels(klusterlet))
}
_, createErr := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Create(ctx, aggregateClusterRole, metav1.CreateOptions{})
return createErr
}

View File

@@ -44,17 +44,24 @@ type managementReconcile struct {
recorder events.Recorder
operatorNamespace string
cache resourceapply.ResourceCache
enableSyncLabels bool
}
func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
err := ensureNamespace(ctx, r.kubeClient, klusterlet, config.AgentNamespace, r.recorder)
labels := map[string]string{}
if r.enableSyncLabels {
labels = helpers.GetKlusterletAgentLabels(klusterlet)
}
err := ensureNamespace(ctx, r.kubeClient, klusterlet, config.AgentNamespace, labels, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}
// Sync pull secret to the agent namespace
err = syncPullSecret(ctx, r.kubeClient, r.kubeClient, klusterlet, r.operatorNamespace, config.AgentNamespace, r.recorder)
err = syncPullSecret(ctx, r.kubeClient, r.kubeClient, klusterlet, r.operatorNamespace, config.AgentNamespace,
labels, r.recorder)
if err != nil {
return klusterlet, reconcileStop, err
}

View File

@@ -29,6 +29,7 @@ type runtimeReconcile struct {
kubeClient kubernetes.Interface
recorder events.Recorder
cache resourceapply.ResourceCache
enableSyncLabels bool
}
func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
@@ -198,9 +199,14 @@ func (r *runtimeReconcile) createManagedClusterKubeconfig(
klusterlet *operatorapiv1.Klusterlet,
klusterletNamespace, agentNamespace, saName, secretName string,
recorder events.Recorder) error {
labels := map[string]string{}
if r.enableSyncLabels {
labels = helpers.GetKlusterletAgentLabels(klusterlet)
}
tokenGetter := helpers.SATokenGetter(ctx, saName, klusterletNamespace, r.managedClusterClients.kubeClient)
err := helpers.SyncKubeConfigSecret(ctx, secretName, agentNamespace, "/spoke/config/kubeconfig",
r.managedClusterClients.kubeconfig, r.kubeClient.CoreV1(), tokenGetter, recorder)
r.managedClusterClients.kubeconfig, r.kubeClient.CoreV1(), tokenGetter, recorder, labels)
if err != nil {
meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{
Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed",

View File

@@ -30,6 +30,7 @@ const defaultComponentNamespace = "open-cluster-management"
type Options struct {
SkipPlaceholderHubSecret bool
EnableSyncLabels bool
}
// RunKlusterletOperator starts a new klusterlet operator
@@ -74,8 +75,16 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
}
deploymentInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, 5*time.Minute,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.LabelSelector = "createdBy=klusterlet"
informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
selector := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: helpers.AgentLabelKey,
Operator: metav1.LabelSelectorOpExists,
},
},
}
listOptions.LabelSelector = metav1.FormatLabelSelector(selector)
}))
// Build operator client and informer
@@ -108,7 +117,8 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *
kubeVersion,
operatorNamespace,
controllerContext.EventRecorder,
o.SkipPlaceholderHubSecret)
o.SkipPlaceholderHubSecret,
o.EnableSyncLabels)
klusterletCleanupController := klusterletcontroller.NewKlusterletCleanupController(
kubeClient,

View File

@@ -23,7 +23,7 @@ import (
)
func startKlusterletOperator(ctx context.Context) {
o := &klusterlet.Options{}
o := &klusterlet.Options{EnableSyncLabels: true}
err := o.RunKlusterletOperator(ctx, &controllercmd.ControllerContext{
KubeConfig: restConfig,
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
@@ -38,12 +38,17 @@ var _ = ginkgo.Describe("Klusterlet", func() {
var klusterletNamespace string
var registrationManagementRoleName string
var registrationManagedRoleName string
var addonManagementRoleName, addonManagementRoleName2 string
var workExecutionRoleName string
var workAggregateRoleName string
var workExecutionRoleBindingName string
var registrationDeploymentName string
var registrationSAName string
var workManagementRoleName string
var workManagedRoleName string
var workDeploymentName string
var workSAName string
var agentLabelSelector string
ginkgo.BeforeEach(func() {
var ctx context.Context
@@ -59,7 +64,8 @@ var _ = ginkgo.Describe("Klusterlet", func() {
klusterlet = &operatorapiv1.Klusterlet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("klusterlet-%s", rand.String(6)),
Name: fmt.Sprintf("klusterlet-%s", rand.String(6)),
Labels: map[string]string{"test": "test", "component": "klusterlet"},
},
Spec: operatorapiv1.KlusterletSpec{
RegistrationImagePullSpec: "quay.io/open-cluster-management/registration",
@@ -74,6 +80,10 @@ var _ = ginkgo.Describe("Klusterlet", func() {
},
}
agentLabelSelector = metav1.FormatLabelSelector(&metav1.LabelSelector{
MatchLabels: helpers.GetKlusterletAgentLabels(klusterlet),
})
hubKubeConfigSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: helpers.HubKubeConfig,
@@ -105,8 +115,15 @@ var _ = ginkgo.Describe("Klusterlet", func() {
workDeploymentName = fmt.Sprintf("%s-work-agent", klusterlet.Name)
registrationManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-registration:agent", klusterlet.Name)
workManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-work:agent", klusterlet.Name)
registrationManagedRoleName = fmt.Sprintf("open-cluster-management:%s-registration:agent", klusterlet.Name)
addonManagementRoleName2 = fmt.Sprintf("open-cluster-management:%s-registration:addon-management", klusterlet.Name)
addonManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-registration:addon-management", klusterlet.Name)
workExecutionRoleName = fmt.Sprintf("open-cluster-management:%s-work:execution", klusterlet.Name)
workManagedRoleName = fmt.Sprintf("open-cluster-management:%s-work:agent", klusterlet.Name)
workAggregateRoleName = fmt.Sprintf("open-cluster-management:%s-work:aggregate", klusterlet.Name)
workExecutionRoleBindingName = fmt.Sprintf("open-cluster-management:%s-work:execution-admin", klusterlet.Name)
registrationSAName = fmt.Sprintf("%s-registration-sa", klusterlet.Name)
workSAName = fmt.Sprintf("%s-work-sa", klusterlet.Name)
})
@@ -150,105 +167,148 @@ var _ = ginkgo.Describe("Klusterlet", func() {
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check clusterrole/clusterrolebinding
gomega.Eventually(func() error {
clusterRoles, err := kubeClient.RbacV1().ClusterRoles().List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return fmt.Errorf("unable to list cluster roles: %v", err)
}
if len(clusterRoles.Items) != 6 {
return fmt.Errorf("expected 6 clusterRoles.Items, got %v", len(clusterRoles.Items))
}
for _, clusterRole := range clusterRoles.Items {
if clusterRole.GetName() != registrationManagedRoleName &&
clusterRole.GetName() != workManagedRoleName &&
clusterRole.GetName() != addonManagementRoleName &&
clusterRole.GetName() != addonManagementRoleName2 &&
clusterRole.GetName() != workExecutionRoleName &&
clusterRole.GetName() != workAggregateRoleName {
return fmt.Errorf("unexpected clusterRole %s", clusterRole.GetName())
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil {
clusterRoleBindings, err := kubeClient.RbacV1().ClusterRoleBindings().List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), workManagedRoleName, metav1.GetOptions{}); err != nil {
if len(clusterRoleBindings.Items) != 6 {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), workManagedRoleName, metav1.GetOptions{}); err != nil {
return false
for _, clusterRoleBinding := range clusterRoleBindings.Items {
if clusterRoleBinding.GetName() != registrationManagedRoleName &&
clusterRoleBinding.GetName() != workManagedRoleName &&
clusterRoleBinding.GetName() != workExecutionRoleBindingName &&
clusterRoleBinding.GetName() != addonManagementRoleName &&
clusterRoleBinding.GetName() != addonManagementRoleName2 &&
clusterRoleBinding.GetName() != workAggregateRoleName {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check role/rolebinding
// Check role/rolebinding and extension apiserver rolebinding
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().Roles(klusterletNamespace).Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
roles, err := kubeClient.RbacV1().Roles(klusterletNamespace).List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().Roles(klusterletNamespace).Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
if len(roles.Items) != 2 {
return false
}
for _, role := range roles.Items {
if role.GetName() != registrationManagementRoleName &&
role.GetName() != workManagementRoleName {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().RoleBindings(klusterletNamespace).Get(
context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
roleBindings, err := kubeClient.RbacV1().RoleBindings(metav1.NamespaceAll).List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().RoleBindings(klusterletNamespace).Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
if len(roleBindings.Items) != 4 {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check extension apiserver rolebinding
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().RoleBindings("kube-system").Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.RbacV1().RoleBindings("kube-system").Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
return false
for _, roleBinding := range roleBindings.Items {
if roleBinding.GetNamespace() != klusterletNamespace &&
roleBinding.GetNamespace() != "kube-system" {
return false
}
if roleBinding.GetName() != registrationManagementRoleName &&
roleBinding.GetName() != workManagementRoleName {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check service account
gomega.Eventually(func() bool {
if _, err := kubeClient.CoreV1().ServiceAccounts(klusterletNamespace).Get(context.Background(), registrationSAName, metav1.GetOptions{}); err != nil {
serviceaccouts, err := kubeClient.CoreV1().ServiceAccounts(klusterletNamespace).List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.CoreV1().ServiceAccounts(klusterletNamespace).Get(context.Background(), workSAName, metav1.GetOptions{}); err != nil {
if len(serviceaccouts.Items) != 2 {
return false
}
for _, serviceAccount := range serviceaccouts.Items {
if serviceAccount.GetName() != registrationSAName &&
serviceAccount.GetName() != workSAName {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check deployment
gomega.Eventually(func() bool {
if _, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), registrationDeploymentName, metav1.GetOptions{}); err != nil {
deployments, err := kubeClient.AppsV1().Deployments(klusterletNamespace).List(context.Background(),
metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
gomega.Eventually(func() bool {
if _, err := kubeClient.AppsV1().Deployments(klusterletNamespace).Get(context.Background(), workDeploymentName, metav1.GetOptions{}); err != nil {
if len(deployments.Items) != 2 {
return false
}
for _, deployment := range deployments.Items {
if deployment.GetName() != registrationDeploymentName &&
deployment.GetName() != workDeploymentName {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
// Check addon namespace
addonNamespace := fmt.Sprintf("%s-addon", klusterletNamespace)
gomega.Eventually(func() bool {
if _, err := kubeClient.CoreV1().Namespaces().Get(context.Background(), addonNamespace, metav1.GetOptions{}); err != nil {
namespaces, err := kubeClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{LabelSelector: agentLabelSelector})
if err != nil {
return false
}
if len(namespaces.Items) != 2 {
return false
}
for _, namespace := range namespaces.Items {
if namespace.GetName() != klusterletNamespace &&
namespace.GetName() != addonNamespace {
return false
}
}
return true
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())