mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-06 01:07:03 +00:00
Fix: change singleton agent sa to work sa (#279)
Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
@@ -4,6 +4,8 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: open-cluster-management:{{ .KlusterletName }}-work:execution
|
||||
labels:
|
||||
open-cluster-management.io/aggregate-to-work: "true"
|
||||
rules:
|
||||
# Allow agent to get/list/watch/create/delete crds.
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: open-cluster-management:{{ .KlusterletName }}-work:execution
|
||||
name: open-cluster-management:{{ .KlusterletName }}-work:aggregate
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .WorkServiceAccount }}
|
||||
|
||||
@@ -35,7 +35,7 @@ spec:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- klusterlet-registration-agent
|
||||
- klusterlet-agent
|
||||
- weight: 30
|
||||
podAffinityTerm:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
@@ -44,8 +44,8 @@ spec:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- klusterlet-registration-agent
|
||||
serviceAccountName: {{ .KlusterletName }}-agent-sa
|
||||
- klusterlet-agent
|
||||
serviceAccountName: {{ .KlusterletName }}-work-sa
|
||||
containers:
|
||||
- name: klusterlet-agent
|
||||
image: {{ .SingletonImage }}
|
||||
|
||||
@@ -49,9 +49,9 @@ func TestSyncDelete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// 11 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments
|
||||
if len(deleteActions) != 27 {
|
||||
t.Errorf("Expected 27 delete actions, but got %d", len(deleteActions))
|
||||
// 11 managed static manifests + 12 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments
|
||||
if len(deleteActions) != 28 {
|
||||
t.Errorf("Expected 28 delete actions, but got %d", len(deleteActions))
|
||||
}
|
||||
|
||||
var updateWorkActions []clienttesting.PatchActionImpl
|
||||
@@ -123,9 +123,9 @@ func TestSyncDeleteHosted(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// 11 static manifests + 2 namespaces
|
||||
if len(deleteActionsManaged) != 13 {
|
||||
t.Errorf("Expected 13 delete actions, but got %d", len(deleteActionsManaged))
|
||||
// 12 static manifests + 2 namespaces
|
||||
if len(deleteActionsManaged) != 14 {
|
||||
t.Errorf("Expected 14 delete actions, but got %d", len(deleteActionsManaged))
|
||||
}
|
||||
|
||||
var updateWorkActions []clienttesting.PatchActionImpl
|
||||
|
||||
@@ -385,9 +385,11 @@ func ensureNamespace(ctx context.Context, kubeClient kubernetes.Interface, klust
|
||||
|
||||
func serviceAccountName(suffix string, klusterlet *operatorapiv1.Klusterlet) string {
|
||||
// in singleton mode, we only need one sa, so the name of work and registration sa are
|
||||
// the same.
|
||||
// the same. We need to use the name of work sa for now, since the work sa permission can be
|
||||
// escalated by create manifestwork from other actors.
|
||||
// TODO(qiujian16) revisit to see if we can use inpersonate in work agent.
|
||||
if helpers.IsSingleton(klusterlet.Spec.DeployOption.Mode) {
|
||||
return fmt.Sprintf("%s-agent-sa", klusterlet.Name)
|
||||
return fmt.Sprintf("%s-work-sa", klusterlet.Name)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", klusterlet.Name, suffix)
|
||||
}
|
||||
|
||||
@@ -508,9 +508,9 @@ func TestSyncDeploy(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if resources are created as expected
|
||||
// 11 managed static manifests + 11 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
|
||||
if len(createObjects) != 23 {
|
||||
t.Errorf("Expect 23 objects created in the sync loop, actual %d", len(createObjects))
|
||||
// 11 managed static manifests + 12 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
|
||||
if len(createObjects) != 24 {
|
||||
t.Errorf("Expect 24 objects created in the sync loop, actual %d", len(createObjects))
|
||||
}
|
||||
for _, object := range createObjects {
|
||||
ensureObject(t, object, klusterlet)
|
||||
@@ -569,8 +569,8 @@ func TestSyncDeploySingleton(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if resources are created as expected
|
||||
// 10 managed static manifests + 10 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments
|
||||
if len(createObjects) != 21 {
|
||||
// 10 managed static manifests + 11 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments
|
||||
if len(createObjects) != 22 {
|
||||
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
|
||||
}
|
||||
for _, object := range createObjects {
|
||||
@@ -658,9 +658,9 @@ func TestSyncDeployHosted(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Check if resources are created as expected on the managed cluster
|
||||
// 11 static manifests + 2 namespaces + 1 pull secret in the addon namespace
|
||||
if len(createObjectsManaged) != 14 {
|
||||
t.Errorf("Expect 14 objects created in the sync loop, actual %d", len(createObjectsManaged))
|
||||
// 12 static manifests + 2 namespaces + 1 pull secret in the addon namespace
|
||||
if len(createObjectsManaged) != 15 {
|
||||
t.Errorf("Expect 15 objects created in the sync loop, actual %d", len(createObjectsManaged))
|
||||
}
|
||||
for _, object := range createObjectsManaged {
|
||||
ensureObject(t, object, klusterlet)
|
||||
@@ -1014,10 +1014,10 @@ func TestDeployOnKube111(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check if resources are created as expected
|
||||
// 11 managed static manifests + 11 management static manifests -
|
||||
// 12 managed static manifests + 11 management static manifests -
|
||||
// 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings
|
||||
if len(createObjects) != 25 {
|
||||
t.Errorf("Expect 25 objects created in the sync loop, actual %d", len(createObjects))
|
||||
if len(createObjects) != 26 {
|
||||
t.Errorf("Expect 26 objects created in the sync loop, actual %d", len(createObjects))
|
||||
}
|
||||
for _, object := range createObjects {
|
||||
ensureObject(t, object, klusterlet)
|
||||
@@ -1059,9 +1059,9 @@ func TestDeployOnKube111(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// 11 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings
|
||||
if len(deleteActions) != 29 {
|
||||
t.Errorf("Expected 29 delete actions, but got %d", len(deleteActions))
|
||||
// 12 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings
|
||||
if len(deleteActions) != 30 {
|
||||
t.Errorf("Expected 30 delete actions, but got %d", len(deleteActions))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -119,6 +120,11 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap
|
||||
}
|
||||
}
|
||||
|
||||
// add aggregation clusterrole for work, this is not allowed in library-go for now, so need an additional creating
|
||||
if err := r.createAggregationRule(ctx, klusterlet); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
applyErrors := utilerrors.NewAggregate(errs)
|
||||
meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{
|
||||
@@ -131,6 +137,32 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap
|
||||
return klusterlet, reconcileContinue, nil
|
||||
}
|
||||
|
||||
func (r *managedReconcile) createAggregationRule(ctx context.Context, klusterlet *operatorapiv1.Klusterlet) error {
|
||||
aggregateClusterRoleName := fmt.Sprintf("open-cluster-management:%s-work:aggregate", klusterlet.Name)
|
||||
_, err := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Get(ctx, aggregateClusterRoleName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
aggregateClusterRole := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: aggregateClusterRoleName,
|
||||
},
|
||||
AggregationRule: &rbacv1.AggregationRule{
|
||||
ClusterRoleSelectors: []metav1.LabelSelector{
|
||||
{
|
||||
MatchLabels: map[string]string{
|
||||
"open-cluster-management.io/aggregate-to-work": "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{},
|
||||
}
|
||||
_, createErr := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Create(ctx, aggregateClusterRole, metav1.CreateOptions{})
|
||||
return createErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
|
||||
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
|
||||
// nothing should be done when deploy mode is hosted and hosted finalizer is not added.
|
||||
@@ -155,6 +187,13 @@ func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
|
||||
}
|
||||
}
|
||||
|
||||
// remove aggregate work clusterrole
|
||||
aggregateClusterRoleName := fmt.Sprintf("open-cluster-management:%s-work:aggregate", klusterlet.Name)
|
||||
if err := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Delete(
|
||||
ctx, aggregateClusterRoleName, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
|
||||
// remove the klusterlet namespace and klusterlet addon namespace on the managed cluster
|
||||
// For now, whether in Default or Hosted mode, the addons could be deployed on the managed cluster.
|
||||
namespaces := []string{config.KlusterletNamespace, fmt.Sprintf("%s-addon", config.KlusterletNamespace)}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (r *runtimeReconcile) installSingletonAgent(ctx context.Context, klusterlet
|
||||
// Create managed config secret for agent. In singletonHosted mode, service account for registration/work is actually
|
||||
// the same one, and we just pick one of them to build the external kubeconfig.
|
||||
if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace,
|
||||
config.RegistrationServiceAccount, config.ExternalManagedKubeConfigAgentSecret,
|
||||
config.WorkServiceAccount, config.ExternalManagedKubeConfigAgentSecret,
|
||||
r.recorder); err != nil {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
|
||||
@@ -989,7 +989,7 @@ func newAggregatedClusterRole(name, apiGroup, resource string) *rbacv1.ClusterRo
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"rbac.authorization.k8s.io/aggregate-to-admin": "true",
|
||||
"open-cluster-management.io/aggregate-to-work": "true",
|
||||
},
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
|
||||
@@ -77,7 +77,7 @@ var _ = ginkgo.Describe("Klusterlet Singleton mode", func() {
|
||||
workManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-work:agent", klusterlet.Name)
|
||||
registrationManagedRoleName = fmt.Sprintf("open-cluster-management:%s-registration:agent", klusterlet.Name)
|
||||
workManagedRoleName = fmt.Sprintf("open-cluster-management:%s-work:agent", klusterlet.Name)
|
||||
saName = fmt.Sprintf("%s-agent-sa", klusterlet.Name)
|
||||
saName = fmt.Sprintf("%s-work-sa", klusterlet.Name)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
||||
Reference in New Issue
Block a user