split work permissions (#252)

* split work permissions

Signed-off-by: haoqing0110 <qhao@redhat.com>

* add more comments

Signed-off-by: haoqing0110 <qhao@redhat.com>
This commit is contained in:
Qing Hao
2022-06-28 10:31:44 +08:00
committed by GitHub
parent 54452f060a
commit 803bfb3748
11 changed files with 76 additions and 54 deletions

View File

@@ -0,0 +1,22 @@
# Adddition ClusterRole permission for work agent
# Work agent needs these permission to apply some resources on the managed cluster.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:execution
rules:
# Allow agent to get/list/watch/create/delete crds.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
# Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "update", "patch", "delete"]
# Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings", "rolebindings"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "roles"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"]

View File

@@ -1,28 +1,10 @@
# Clusterrole for work agent in addition to admin clusterrole.
# Mandatory ClusterRole permission for work agent
# Work agent can not run without these permissions
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:agent
rules:
# Allow agent to get/list/watch/create/delete crds.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
# Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "update", "patch", "delete"]
# Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings", "rolebindings"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles", "roles"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"]
# Allow agent to create sar
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
# Allow agent to managed appliedmanifestworks
- apiGroups: ["work.open-cluster-management.io"]
resources: ["appliedmanifestworks"]

View File

@@ -0,0 +1,16 @@
# ClusterRoleBinding for work execution permissions.
# TODO: replace this with user defined execution permissions.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:execution-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
# We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is
# responsivity because list/watch cannot be maintained over too many namespaces.
name: admin
subjects:
- kind: ServiceAccount
name: {{ .KlusterletName }}-work-sa
namespace: {{ .KlusterletNamespace }}

View File

@@ -1,11 +1,13 @@
# ClusterRoleBinding for work execution permissions.
# TODO: replace this with user defined execution permissions.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-cluster-management:{{ .KlusterletName }}-work:agent-addition
name: open-cluster-management:{{ .KlusterletName }}-work:execution
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: open-cluster-management:{{ .KlusterletName }}-work:agent
name: open-cluster-management:{{ .KlusterletName }}-work:execution
subjects:
- kind: ServiceAccount
name: {{ .KlusterletName }}-work-sa

View File

@@ -1,3 +1,4 @@
# ClusterRoleBinding for work mandatory permissions.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -5,9 +6,7 @@ metadata:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
# We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is
# responsivity because list/watch cannot be maintained over too many namespaces.
name: admin
name: open-cluster-management:{{ .KlusterletName }}-work:agent
subjects:
- kind: ServiceAccount
name: {{ .KlusterletName }}-work-sa

View File

@@ -1,4 +1,5 @@
# Role for work agent.
# Mandatory Role permission for work agent
# Work agent can not run without these permissions
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
@@ -12,9 +13,6 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "list", "update", "watch", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "events.k8s.io"]
resources: ["events"]
verbs: ["create", "patch", "update"]

View File

@@ -1,3 +1,4 @@
# RoleBinding for work mandatory permissions.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@@ -70,8 +70,10 @@ var (
"klusterlet/managed/klusterlet-registration-clusterrolebinding-addon-management.yaml",
"klusterlet/managed/klusterlet-work-serviceaccount.yaml",
"klusterlet/managed/klusterlet-work-clusterrole.yaml",
"klusterlet/managed/klusterlet-work-clusterrole-execution.yaml",
"klusterlet/managed/klusterlet-work-clusterrolebinding.yaml",
"klusterlet/managed/klusterlet-work-clusterrolebinding-addition.yaml",
"klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml",
"klusterlet/managed/klusterlet-work-clusterrolebinding-execution-admin.yaml",
}
managementStaticResourceFiles = []string{

View File

@@ -431,9 +431,9 @@ func TestSyncDeploy(t *testing.T) {
}
// Check if resources are created as expected
// 9 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
if len(createObjects) != 19 {
t.Errorf("Expect 19 objects created in the sync loop, actual %d", len(createObjects))
// 11 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments
if len(createObjects) != 21 {
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet)
@@ -513,9 +513,9 @@ func TestSyncDeployHosted(t *testing.T) {
}
}
// Check if resources are created as expected on the managed cluster
// 9 static manifests + 2 namespaces + 1 pull secret in the addon namespace
if len(createObjectsManaged) != 12 {
t.Errorf("Expect 12 objects created in the sync loop, actual %d", len(createObjectsManaged))
// 11 static manifests + 2 namespaces + 1 pull secret in the addon namespace
if len(createObjectsManaged) != 14 {
t.Errorf("Expect 14 objects created in the sync loop, actual %d", len(createObjectsManaged))
}
for _, object := range createObjectsManaged {
ensureObject(t, object, klusterlet)
@@ -600,9 +600,9 @@ func TestSyncDelete(t *testing.T) {
}
}
// 9 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments
if len(deleteActions) != 23 {
t.Errorf("Expected 23 delete actions, but got %d", len(deleteActions))
// 11 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments
if len(deleteActions) != 25 {
t.Errorf("Expected 25 delete actions, but got %d", len(deleteActions))
}
deleteCRDActions := []clienttesting.DeleteActionImpl{}
@@ -682,9 +682,9 @@ func TestSyncDeleteHosted(t *testing.T) {
}
}
// 9 static manifests + 2 namespaces
if len(deleteActionsManaged) != 11 {
t.Errorf("Expected 11 delete actions, but got %d", len(deleteActionsManaged))
// 11 static manifests + 2 namespaces
if len(deleteActionsManaged) != 13 {
t.Errorf("Expected 13 delete actions, but got %d", len(deleteActionsManaged))
}
deleteCRDActions := []clienttesting.DeleteActionImpl{}
@@ -983,9 +983,9 @@ func TestDeployOnKube111(t *testing.T) {
}
// Check if resources are created as expected
// 9 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings
if len(createObjects) != 21 {
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
// 11 managed static manifests + 9 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings
if len(createObjects) != 23 {
t.Errorf("Expect 23 objects created in the sync loop, actual %d", len(createObjects))
}
for _, object := range createObjects {
ensureObject(t, object, klusterlet)
@@ -1029,9 +1029,9 @@ func TestDeployOnKube111(t *testing.T) {
}
}
// 9 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings
if len(deleteActions) != 25 {
t.Errorf("Expected 25 delete actions, but got %d", len(deleteActions))
// 11 managed static manifests + 9 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings
if len(deleteActions) != 27 {
t.Errorf("Expected 27 delete actions, but got %d", len(deleteActions))
}
}

View File

@@ -112,9 +112,9 @@ var _ = ginkgo.Describe("Klusterlet Hosted mode", func() {
return err
}
// 9 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status)
if len(actual.Status.RelatedResources) != 22 {
return fmt.Errorf("should get 22 relatedResources, actual got %v", len(actual.Status.RelatedResources))
// 11 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status)
if len(actual.Status.RelatedResources) != 24 {
return fmt.Errorf("should get 24 relatedResources, actual got %v", len(actual.Status.RelatedResources))
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())

View File

@@ -132,9 +132,9 @@ var _ = ginkgo.Describe("Klusterlet", func() {
return err
}
// 9 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status)
if len(actual.Status.RelatedResources) != 22 {
return fmt.Errorf("should get 22 relatedResources, actual got %v", len(actual.Status.RelatedResources))
// 11 managed static manifests + 9 management static manifests + 2CRDs + 2 deployments(2 duplicated CRDs, but status also recorded in the klusterlet's status)
if len(actual.Status.RelatedResources) != 24 {
return fmt.Errorf("should get 24 relatedResources, actual got %v", len(actual.Status.RelatedResources))
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())