From 803bfb374805414222e2aff669b73588dffe3537 Mon Sep 17 00:00:00 2001 From: Qing Hao Date: Tue, 28 Jun 2022 10:31:44 +0800 Subject: [PATCH] split work permissions (#252) * split work permissions Signed-off-by: haoqing0110 * add more comments Signed-off-by: haoqing0110 --- ...klusterlet-work-clusterrole-execution.yaml | 22 ++++++++++++ .../managed/klusterlet-work-clusterrole.yaml | 22 ++---------- ...rk-clusterrolebinding-execution-admin.yaml | 16 +++++++++ ...et-work-clusterrolebinding-execution.yaml} | 6 ++-- .../klusterlet-work-clusterrolebinding.yaml | 5 ++- .../management/klusterlet-work-role.yaml | 6 ++-- .../klusterlet-work-rolebinding.yaml | 1 + .../klusterlet_controller.go | 4 ++- .../klusterlet_controller_test.go | 36 +++++++++---------- test/integration/klusterlet_hosted_test.go | 6 ++-- test/integration/klusterlet_test.go | 6 ++-- 11 files changed, 76 insertions(+), 54 deletions(-) create mode 100644 manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml create mode 100644 manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml rename manifests/klusterlet/managed/{klusterlet-work-clusterrolebinding-addition.yaml => klusterlet-work-clusterrolebinding-execution.yaml} (50%) diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml new file mode 100644 index 000000000..a159c9557 --- /dev/null +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml @@ -0,0 +1,22 @@ +# Adddition ClusterRole permission for work agent +# Work agent needs these permission to apply some resources on the managed cluster. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:{{ .KlusterletName }}-work:execution +rules: +# Allow agent to get/list/watch/create/delete crds. +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "delete", "update"] +# Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["create", "update", "patch", "delete"] +# Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings", "rolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "roles"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrole.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrole.yaml index 82517c0bc..7eab35751 100644 --- a/manifests/klusterlet/managed/klusterlet-work-clusterrole.yaml +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrole.yaml @@ -1,28 +1,10 @@ -# Clusterrole for work agent in addition to admin clusterrole. +# Mandatory ClusterRole permission for work agent +# Work agent can not run without these permissions apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: open-cluster-management:{{ .KlusterletName }}-work:agent rules: -# Allow agent to get/list/watch/create/delete crds. -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "list", "watch", "create", "delete", "update"] -# Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already -- apiGroups: [""] - resources: ["namespaces"] - verbs: ["create", "update", "patch", "delete"] -# Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterrolebindings", "rolebindings"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterroles", "roles"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] -# Allow agent to create sar -- apiGroups: ["authorization.k8s.io"] - resources: ["subjectaccessreviews"] - verbs: ["create"] # Allow agent to managed appliedmanifestworks - apiGroups: ["work.open-cluster-management.io"] resources: ["appliedmanifestworks"] diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml new file mode 100644 index 000000000..82f435dd2 --- /dev/null +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml @@ -0,0 +1,16 @@ +# ClusterRoleBinding for work execution permissions. +# TODO: replace this with user defined execution permissions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:{{ .KlusterletName }}-work:execution-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + # We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is + # responsivity because list/watch cannot be maintained over too many namespaces. + name: admin +subjects: + - kind: ServiceAccount + name: {{ .KlusterletName }}-work-sa + namespace: {{ .KlusterletNamespace }} diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-addition.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml similarity index 50% rename from manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-addition.yaml rename to manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml index e76d3da1e..68b09fdaa 100644 --- a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-addition.yaml +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml @@ -1,11 +1,13 @@ +# ClusterRoleBinding for work execution permissions. +# TODO: replace this with user defined execution permissions. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: open-cluster-management:{{ .KlusterletName }}-work:agent-addition + name: open-cluster-management:{{ .KlusterletName }}-work:execution roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: open-cluster-management:{{ .KlusterletName }}-work:agent + name: open-cluster-management:{{ .KlusterletName }}-work:execution subjects: - kind: ServiceAccount name: {{ .KlusterletName }}-work-sa diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding.yaml index 561f162f2..7fada6ac3 100644 --- a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding.yaml +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding.yaml @@ -1,3 +1,4 @@ +# ClusterRoleBinding for work mandatory permissions. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -5,9 +6,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - # We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is - # responsivity because list/watch cannot be maintained over too many namespaces. - name: admin + name: open-cluster-management:{{ .KlusterletName }}-work:agent subjects: - kind: ServiceAccount name: {{ .KlusterletName }}-work-sa diff --git a/manifests/klusterlet/management/klusterlet-work-role.yaml b/manifests/klusterlet/management/klusterlet-work-role.yaml index 40bdfcf3f..7be545521 100644 --- a/manifests/klusterlet/management/klusterlet-work-role.yaml +++ b/manifests/klusterlet/management/klusterlet-work-role.yaml @@ -1,4 +1,5 @@ -# Role for work agent. +# Mandatory Role permission for work agent +# Work agent can not run without these permissions apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -12,9 +13,6 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["create", "get", "list", "update", "watch", "patch"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - apiGroups: ["", "events.k8s.io"] resources: ["events"] verbs: ["create", "patch", "update"] diff --git a/manifests/klusterlet/management/klusterlet-work-rolebinding.yaml b/manifests/klusterlet/management/klusterlet-work-rolebinding.yaml index e97a9137c..a5e45e3a3 100644 --- a/manifests/klusterlet/management/klusterlet-work-rolebinding.yaml +++ b/manifests/klusterlet/management/klusterlet-work-rolebinding.yaml @@ -1,3 +1,4 @@ +# RoleBinding for work mandatory permissions. apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: diff --git a/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index f3df2a69a..4ab847401 100644 --- a/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -70,8 +70,10 @@ var ( "klusterlet/managed/klusterlet-registration-clusterrolebinding-addon-management.yaml", "klusterlet/managed/klusterlet-work-serviceaccount.yaml", "klusterlet/managed/klusterlet-work-clusterrole.yaml", + "klusterlet/managed/klusterlet-work-clusterrole-execution.yaml", "klusterlet/managed/klusterlet-work-clusterrolebinding.yaml", - "klusterlet/managed/klusterlet-work-clusterrolebinding-addition.yaml", + "klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml", + "klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml", } managementStaticResourceFiles = []string{ diff --git a/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 880a8a666..b106eadd1 100644 --- a/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -431,9 +431,9 @@ func TestSyncDeploy(t *testing.T) { } // Check if resources are created as expected - // 9 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments - if len(createObjects) != 19 { - t.Errorf("Expect 19 objects created in the sync loop, actual %d", len(createObjects)) + // 11 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + if len(createObjects) != 21 { + t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { ensureObject(t, object, klusterlet) @@ -513,9 +513,9 @@ func TestSyncDeployHosted(t *testing.T) { } } // Check if resources are created as expected on the managed cluster - // 9 static manifests + 2 namespaces + 1 pull secret in the addon namespace - if len(createObjectsManaged) != 12 { - t.Errorf("Expect 12 objects created in the sync loop, actual %d", len(createObjectsManaged)) + // 11 static manifests + 2 namespaces + 1 pull secret in the addon namespace + if len(createObjectsManaged) != 14 { + t.Errorf("Expect 14 objects created in the sync loop, actual %d", len(createObjectsManaged)) } for _, object := range createObjectsManaged { ensureObject(t, object, klusterlet) @@ -600,9 +600,9 @@ func TestSyncDelete(t *testing.T) { } } - // 9 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments - if len(deleteActions) != 23 { - t.Errorf("Expected 23 delete actions, but got %d", len(deleteActions)) + // 11 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + if len(deleteActions) != 25 { + t.Errorf("Expected 25 delete actions, but got %d", len(deleteActions)) } deleteCRDActions := []clienttesting.DeleteActionImpl{} @@ -682,9 +682,9 @@ func TestSyncDeleteHosted(t *testing.T) { } } - // 9 static manifests + 2 namespaces - if len(deleteActionsManaged) != 11 { - t.Errorf("Expected 11 delete actions, but got %d", len(deleteActionsManaged)) + // 11 static manifests + 2 namespaces + if len(deleteActionsManaged) != 13 { + t.Errorf("Expected 13 delete actions, but got %d", len(deleteActionsManaged)) } deleteCRDActions := []clienttesting.DeleteActionImpl{} @@ -983,9 +983,9 @@ func TestDeployOnKube111(t *testing.T) { } // Check if resources are created as expected - // 9 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings - if len(createObjects) != 21 { - t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects)) + // 11 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings + if len(createObjects) != 23 { + t.Errorf("Expect 23 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { ensureObject(t, object, klusterlet) @@ -1029,9 +1029,9 @@ func TestDeployOnKube111(t *testing.T) { } } - // 9 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings - if len(deleteActions) != 25 { - t.Errorf("Expected 25 delete actions, but got %d", len(deleteActions)) + // 11 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings + if len(deleteActions) != 27 { + t.Errorf("Expected 27 delete actions, but got %d", len(deleteActions)) } } diff --git a/test/integration/klusterlet_hosted_test.go b/test/integration/klusterlet_hosted_test.go index 8e7d2d13a..3c9f6710e 100644 --- a/test/integration/klusterlet_hosted_test.go +++ b/test/integration/klusterlet_hosted_test.go @@ -112,9 +112,9 @@ var _ = ginkgo.Describe("Klusterlet Hosted mode", func() { return err } - // 9 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) - if len(actual.Status.RelatedResources) != 22 { - return fmt.Errorf("should get 22 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + // 11 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) + if len(actual.Status.RelatedResources) != 24 { + return fmt.Errorf("should get 24 relatedResources, actual got %v", len(actual.Status.RelatedResources)) } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) diff --git a/test/integration/klusterlet_test.go b/test/integration/klusterlet_test.go index 27fcfaeb6..3669b6cc3 100644 --- a/test/integration/klusterlet_test.go +++ b/test/integration/klusterlet_test.go @@ -132,9 +132,9 @@ var _ = ginkgo.Describe("Klusterlet", func() { return err } - // 9 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) - if len(actual.Status.RelatedResources) != 22 { - return fmt.Errorf("should get 22 relatedResources, actual got %v", len(actual.Status.RelatedResources)) + // 11 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status) + if len(actual.Status.RelatedResources) != 24 { + return fmt.Errorf("should get 24 relatedResources, actual got %v", len(actual.Status.RelatedResources)) } return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())