From 1300a980f036b1a3e249aec49ec6753f913b9ebd Mon Sep 17 00:00:00 2001 From: Sunghoon Kang Date: Tue, 15 Mar 2022 12:55:50 +0900 Subject: [PATCH] Feat: reconcile app with scoped permissions (#3434) * Refactor: refactor multi cluster round trippers Before adding more RoundTrippers, it would be better to expose common logic in the utility package. This commit exports `tryCancelRequest` at `utils` package, and make `secretMultiClusterRoundTripper` implement `RoundTripperWrapper` interface to allow chaining multiple round trippers. Refs #3432 Signed-off-by: Sunghoon Kang * Feat: reconcile app with scoped permissions Currently, all Application resources are reconciled by the Roles bound to the controller service account. This behavior gives us the power to manage resources across multiple namespaces. However, this behavior can be problematic in the soft-multitenancy environment. This commit adds `serviceAccountName` to ApplicationSepc to reconcile Application with the given service account for reconciling Application with scoped permissions. Refs #3432 Signed-off-by: Sunghoon Kang * Refactor: extract context setter as method https://github.com/oam-dev/kubevela/pull/3434#discussion_r825561603 Signed-off-by: Sunghoon Kang * Feat: use annotation instead of spec https://github.com/oam-dev/kubevela/issues/3432#issuecomment-1066460269 Signed-off-by: Sunghoon Kang * Refactor: unify service account setter caller https://github.com/oam-dev/kubevela/pull/3434#discussion_r825853612 Signed-off-by: Sunghoon Kang * Refactor: rename GetServiceAccountName https://github.com/oam-dev/kubevela/pull/3434#discussion_r826514565 Signed-off-by: Sunghoon Kang --- cmd/core/main.go | 2 + pkg/auth/round_trippers.go | 69 +++++++++ pkg/auth/round_trippers_test.go | 87 +++++++++++ .../v1alpha2/application/generator.go | 2 +- pkg/multicluster/proxy.go | 38 ++--- pkg/oam/auxiliary.go | 8 + pkg/oam/labels.go | 4 + pkg/oam/util/helper.go | 27 ++++ pkg/resourcekeeper/delete.go | 5 +- pkg/resourcekeeper/dispatch.go | 2 + pkg/resourcekeeper/statekeep.go | 10 +- pkg/utils/round_trippers.go | 41 +++++ pkg/workflow/providers/kube/handle.go | 22 ++- pkg/workflow/tasks/discover.go | 2 +- test/e2e-test/application_test.go | 145 ++++++++++++++++-- test/e2e-test/testdata/app/app11.yaml | 15 ++ 16 files changed, 443 insertions(+), 36 deletions(-) create mode 100644 pkg/auth/round_trippers.go create mode 100644 pkg/auth/round_trippers_test.go create mode 100644 pkg/utils/round_trippers.go create mode 100644 test/e2e-test/testdata/app/app11.yaml diff --git a/cmd/core/main.go b/cmd/core/main.go index 9f064cd42..8f9eadd7e 100644 --- a/cmd/core/main.go +++ b/cmd/core/main.go @@ -35,6 +35,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" + "github.com/oam-dev/kubevela/pkg/auth" ctrlClient "github.com/oam-dev/kubevela/pkg/client" standardcontroller "github.com/oam-dev/kubevela/pkg/controller" commonconfig "github.com/oam-dev/kubevela/pkg/controller/common" @@ -198,6 +199,7 @@ func main() { restConfig.UserAgent = kubevelaName + "/" + version.GitRevision restConfig.QPS = float32(qps) restConfig.Burst = burst + restConfig.Wrap(auth.NewImpersonatingRoundTripper) // wrapper the round tripper by multi cluster rewriter if enableClusterGateway { diff --git a/pkg/auth/round_trippers.go b/pkg/auth/round_trippers.go new file mode 100644 index 000000000..4cc78b43c --- /dev/null +++ b/pkg/auth/round_trippers.go @@ -0,0 +1,69 @@ +/* + + Copyright 2021 The KubeVela Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package auth + +import ( + "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/client-go/transport" + + "github.com/oam-dev/kubevela/pkg/multicluster" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" + "github.com/oam-dev/kubevela/pkg/utils" +) + +var _ utilnet.RoundTripperWrapper = &impersonatingRoundTripper{} + +type impersonatingRoundTripper struct { + rt http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an ImpersonateUser header to a request +// if the context has a specific user whom to act-as. +func NewImpersonatingRoundTripper(rt http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{ + rt: rt, + } +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + + // Skip impersonation on non-local cluster requests + if !multicluster.IsInLocalCluster(ctx) { + return rt.rt.RoundTrip(req) + } + + sa := oamutil.GetServiceAccountInContext(ctx) + if sa == "" { + return rt.rt.RoundTrip(req) + } + req = req.Clone(req.Context()) + req.Header.Set(transport.ImpersonateUserHeader, sa) + return rt.rt.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + utils.TryCancelRequest(rt.WrappedRoundTripper(), req) +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.rt +} diff --git a/pkg/auth/round_trippers_test.go b/pkg/auth/round_trippers_test.go new file mode 100644 index 000000000..07d7139eb --- /dev/null +++ b/pkg/auth/round_trippers_test.go @@ -0,0 +1,87 @@ +/* + + Copyright 2021 The KubeVela Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package auth + +import ( + "context" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/client-go/transport" + + "github.com/oam-dev/kubevela/pkg/multicluster" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" +) + +type testRoundTripper struct { + Request *http.Request + Response *http.Response + Err error +} + +func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.Request = req + return rt.Response, rt.Err +} + +func TestImpersonatingRoundTripper(t *testing.T) { + testSets := map[string]struct { + ctxFn func(context.Context) context.Context + expected string + }{ + "with service account": { + ctxFn: func(ctx context.Context) context.Context { + ctx = oamutil.SetServiceAccountInContext(ctx, "vela-system", "default") + return ctx + }, + expected: "system:serviceaccount:vela-system:default", + }, + "without service account": { + ctxFn: func(ctx context.Context) context.Context { + return ctx + }, + expected: "", + }, + "ignore if non-local cluster request": { + ctxFn: func(ctx context.Context) context.Context { + ctx = multicluster.ContextWithClusterName(ctx, "test-cluster") + ctx = oamutil.SetServiceAccountInContext(ctx, "vela-system", "default") + return ctx + }, + expected: "", + }, + } + for name, ts := range testSets { + t.Run(name, func(t *testing.T) { + ctx := ts.ctxFn(context.TODO()) + req, _ := http.NewRequest(http.MethodGet, "/", nil) + req = req.WithContext(ctx) + rt := &testRoundTripper{} + _, err := NewImpersonatingRoundTripper(rt).RoundTrip(req) + require.NoError(t, err) + if ts.expected == "" { + _, ok := rt.Request.Header[transport.ImpersonateUserHeader] + require.False(t, ok) + return + } + require.Equal(t, ts.expected, rt.Request.Header.Get(transport.ImpersonateUserHeader)) + }) + } +} diff --git a/pkg/controller/core.oam.dev/v1alpha2/application/generator.go b/pkg/controller/core.oam.dev/v1alpha2/application/generator.go index d8cd5b9ff..49d1e57ba 100644 --- a/pkg/controller/core.oam.dev/v1alpha2/application/generator.go +++ b/pkg/controller/core.oam.dev/v1alpha2/application/generator.go @@ -63,7 +63,7 @@ func (h *AppHandler) GenerateApplicationSteps(ctx context.Context, appRev *v1beta1.ApplicationRevision) ([]wfTypes.TaskRunner, error) { handlerProviders := providers.NewProviders() - kube.Install(handlerProviders, h.r.Client, h.Dispatch, h.Delete) + kube.Install(handlerProviders, app, h.r.Client, h.Dispatch, h.Delete) oamProvider.Install(handlerProviders, app, af, h.r.Client, h.applyComponentFunc( appParser, appRev, af), h.renderComponentFunc(appParser, appRev, af)) http.Install(handlerProviders, h.r.Client, app.Namespace) diff --git a/pkg/multicluster/proxy.go b/pkg/multicluster/proxy.go index 10ef260b2..f4de2f693 100644 --- a/pkg/multicluster/proxy.go +++ b/pkg/multicluster/proxy.go @@ -20,13 +20,15 @@ import ( "net/http" "strings" + clusterapi "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/transport" - "k8s.io/klog/v2" - clusterapi "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" + "github.com/oam-dev/kubevela/pkg/utils" ) +var _ utilnet.RoundTripperWrapper = &secretMultiClusterRoundTripper{} + type secretMultiClusterRoundTripper struct { rt http.RoundTripper } @@ -56,27 +58,17 @@ func (rt *secretMultiClusterRoundTripper) RoundTrip(req *http.Request) (*http.Re return rt.rt.RoundTrip(req) } -func tryCancelRequest(rt http.RoundTripper, req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - switch rt := rt.(type) { - case canceler: - rt.CancelRequest(req) - case utilnet.RoundTripperWrapper: - tryCancelRequest(rt.WrappedRoundTripper(), req) - default: - klog.Warningf("Unable to cancel request for %T", rt) - } -} - // CancelRequest will try cancel request with the inner round tripper func (rt *secretMultiClusterRoundTripper) CancelRequest(req *http.Request) { - tryCancelRequest(rt.WrappedRoundTripper(), req) + utils.TryCancelRequest(rt.WrappedRoundTripper(), req) } // WrappedRoundTripper can get the wrapped RoundTripper -func (rt *secretMultiClusterRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } +func (rt *secretMultiClusterRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.rt +} + +var _ utilnet.RoundTripperWrapper = &secretMultiClusterRoundTripperForCluster{} type secretMultiClusterRoundTripperForCluster struct { rt http.RoundTripper @@ -93,6 +85,16 @@ func (rt *secretMultiClusterRoundTripperForCluster) RoundTrip(req *http.Request) return rt.rt.RoundTrip(req) } +// CancelRequest will try cancel request with the inner round tripper +func (rt *secretMultiClusterRoundTripperForCluster) CancelRequest(req *http.Request) { + utils.TryCancelRequest(rt.WrappedRoundTripper(), req) +} + +// WrappedRoundTripper can get the wrapped RoundTripper +func (rt *secretMultiClusterRoundTripperForCluster) WrappedRoundTripper() http.RoundTripper { + return rt.rt +} + // NewSecretModeMultiClusterRoundTripperForCluster will re-write the API path to the specific cluster func NewSecretModeMultiClusterRoundTripperForCluster(rt http.RoundTripper, clusterName string) http.RoundTripper { return &secretMultiClusterRoundTripperForCluster{ diff --git a/pkg/oam/auxiliary.go b/pkg/oam/auxiliary.go index 6116692f5..4007b7da2 100644 --- a/pkg/oam/auxiliary.go +++ b/pkg/oam/auxiliary.go @@ -33,3 +33,11 @@ func GetCluster(o client.Object) string { } return "" } + +// GetServiceAccountNameFromAnnotations extracts the service account name from the given object's annotations. +func GetServiceAccountNameFromAnnotations(o client.Object) string { + if annotations := o.GetAnnotations(); annotations != nil { + return annotations[AnnotationServiceAccountName] + } + return "" +} diff --git a/pkg/oam/labels.go b/pkg/oam/labels.go index 195a67762..3867cff9f 100644 --- a/pkg/oam/labels.go +++ b/pkg/oam/labels.go @@ -185,4 +185,8 @@ const ( // AnnotationControllerRequirement indicates the controller version that can process the application. AnnotationControllerRequirement = "app.oam.dev/controller-version-require" + + // AnnotationServiceAccountName indicates the name of the ServiceAccount to use to apply Components and run Workflow. + // ServiceAccount will be used in the local cluster only. + AnnotationServiceAccountName = "app.oam.dev/service-account-name" ) diff --git a/pkg/oam/util/helper.go b/pkg/oam/util/helper.go index 8dea9e04d..3b4413033 100644 --- a/pkg/oam/util/helper.go +++ b/pkg/oam/util/helper.go @@ -146,6 +146,13 @@ const ( AppDefinitionNamespace namespaceContextKey = iota ) +type serviceAccountContextKey int + +const ( + // ServiceAccountContextKey is the context key to define the service account for the app + ServiceAccountContextKey serviceAccountContextKey = iota +) + // A ConditionedObject is an Object type with condition field type ConditionedObject interface { client.Object @@ -300,6 +307,26 @@ func SetNamespaceInCtx(ctx context.Context, namespace string) context.Context { return ctx } +// GetServiceAccountInContext returns the name of the service account which reconciles the app from the context. +func GetServiceAccountInContext(ctx context.Context) string { + if serviceAccount, ok := ctx.Value(ServiceAccountContextKey).(string); ok { + return serviceAccount + } + return "" +} + +// SetServiceAccountInContext sets the name of the service account which reconciles the app. +func SetServiceAccountInContext(ctx context.Context, namespace, name string) context.Context { + if name == "" { + // We may set `default` service account when the service account name is omitted. + // However, setting `default` service account will break existing cluster-scoped applications, + // so it would be better to give users a migration term. + // TODO(devholic): Use `default` service account if omitted. + return ctx + } + return context.WithValue(ctx, ServiceAccountContextKey, fmt.Sprintf("system:serviceaccount:%s:%s", namespace, name)) +} + // GetDefinition get definition from two level namespace func GetDefinition(ctx context.Context, cli client.Reader, definition client.Object, definitionName string) error { if dns := os.Getenv(DefinitionNamespaceEnv); dns != "" { diff --git a/pkg/resourcekeeper/delete.go b/pkg/resourcekeeper/delete.go index d3cf0e1c0..bc7ca939c 100644 --- a/pkg/resourcekeeper/delete.go +++ b/pkg/resourcekeeper/delete.go @@ -26,6 +26,7 @@ import ( "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/pkg/multicluster" "github.com/oam-dev/kubevela/pkg/oam" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" "github.com/oam-dev/kubevela/pkg/resourcetracker" ) @@ -85,7 +86,9 @@ func (h *resourceKeeper) delete(ctx context.Context, manifest *unstructured.Unst } } // 2. delete manifests - if err = h.Client.Delete(multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest)), manifest); err != nil && !kerrors.IsNotFound(err) { + deleteCtx := multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest)) + deleteCtx = oamutil.SetServiceAccountInContext(deleteCtx, h.app.Namespace, oam.GetServiceAccountNameFromAnnotations(h.app)) + if err = h.Client.Delete(deleteCtx, manifest); err != nil && !kerrors.IsNotFound(err) { return errors.Wrapf(err, "cannot delete manifest, name: %s apiVersion: %s kind: %s", manifest.GetName(), manifest.GetAPIVersion(), manifest.GetKind()) } return nil diff --git a/pkg/resourcekeeper/dispatch.go b/pkg/resourcekeeper/dispatch.go index 843dabded..c098e3796 100644 --- a/pkg/resourcekeeper/dispatch.go +++ b/pkg/resourcekeeper/dispatch.go @@ -24,6 +24,7 @@ import ( "github.com/oam-dev/kubevela/pkg/multicluster" "github.com/oam-dev/kubevela/pkg/oam" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" "github.com/oam-dev/kubevela/pkg/resourcetracker" "github.com/oam-dev/kubevela/pkg/utils/apply" velaerrors "github.com/oam-dev/kubevela/pkg/utils/errors" @@ -121,6 +122,7 @@ func (h *resourceKeeper) dispatch(ctx context.Context, manifests []*unstructured applyOpts := []apply.ApplyOption{apply.MustBeControlledByApp(h.app), apply.NotUpdateRenderHashEqual()} errs := parallel.Run(func(manifest *unstructured.Unstructured) error { applyCtx := multicluster.ContextWithClusterName(ctx, oam.GetCluster(manifest)) + applyCtx = oamutil.SetServiceAccountInContext(applyCtx, h.app.Namespace, oam.GetServiceAccountNameFromAnnotations(h.app)) return h.applicator.Apply(applyCtx, manifest, applyOpts...) }, manifests, MaxDispatchConcurrent) return velaerrors.AggregateErrors(errs.([]error)) diff --git a/pkg/resourcekeeper/statekeep.go b/pkg/resourcekeeper/statekeep.go index 7b44c6880..950b5283d 100644 --- a/pkg/resourcekeeper/statekeep.go +++ b/pkg/resourcekeeper/statekeep.go @@ -23,6 +23,8 @@ import ( "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/pkg/multicluster" + "github.com/oam-dev/kubevela/pkg/oam" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" "github.com/oam-dev/kubevela/pkg/utils/apply" ) @@ -40,7 +42,9 @@ func (h *resourceKeeper) StateKeep(ctx context.Context) error { } if mr.Deleted { if entry.exists && entry.obj != nil && entry.obj.GetDeletionTimestamp() == nil { - if err := h.Client.Delete(multicluster.ContextWithClusterName(ctx, mr.Cluster), entry.obj); err != nil { + deleteCtx := multicluster.ContextWithClusterName(ctx, mr.Cluster) + deleteCtx = oamutil.SetServiceAccountInContext(deleteCtx, h.app.Namespace, oam.GetServiceAccountNameFromAnnotations(h.app)) + if err := h.Client.Delete(deleteCtx, entry.obj); err != nil { return errors.Wrapf(err, "failed to delete outdated resource %s in resourcetracker %s", mr.ResourceKey(), rt.Name) } } @@ -53,7 +57,9 @@ func (h *resourceKeeper) StateKeep(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to decode resource %s from resourcetracker", mr.ResourceKey()) } - if err = h.applicator.Apply(multicluster.ContextWithClusterName(ctx, mr.Cluster), manifest, apply.MustBeControlledByApp(h.app)); err != nil { + applyCtx := multicluster.ContextWithClusterName(ctx, mr.Cluster) + applyCtx = oamutil.SetServiceAccountInContext(applyCtx, h.app.Namespace, oam.GetServiceAccountNameFromAnnotations(h.app)) + if err = h.applicator.Apply(applyCtx, manifest, apply.MustBeControlledByApp(h.app)); err != nil { return errors.Wrapf(err, "failed to re-apply resource %s from resourcetracker %s", mr.ResourceKey(), rt.Name) } } diff --git a/pkg/utils/round_trippers.go b/pkg/utils/round_trippers.go new file mode 100644 index 000000000..6003f7cac --- /dev/null +++ b/pkg/utils/round_trippers.go @@ -0,0 +1,41 @@ +/* + + Copyright 2021 The KubeVela Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package utils + +import ( + "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog/v2" +) + +// TryCancelRequest tries to cancel the request by traversing round trippers +func TryCancelRequest(rt http.RoundTripper, req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + switch rt := rt.(type) { + case canceler: + rt.CancelRequest(req) + case utilnet.RoundTripperWrapper: + TryCancelRequest(rt.WrappedRoundTripper(), req) + default: + klog.Warningf("Unable to cancel request for %T", rt) + } +} diff --git a/pkg/workflow/providers/kube/handle.go b/pkg/workflow/providers/kube/handle.go index cd8f99390..872c5c865 100644 --- a/pkg/workflow/providers/kube/handle.go +++ b/pkg/workflow/providers/kube/handle.go @@ -24,9 +24,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/oam-dev/kubevela/apis/core.oam.dev/common" + "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/pkg/cue/model" "github.com/oam-dev/kubevela/pkg/cue/model/value" "github.com/oam-dev/kubevela/pkg/multicluster" + "github.com/oam-dev/kubevela/pkg/oam" + oamutil "github.com/oam-dev/kubevela/pkg/oam/util" wfContext "github.com/oam-dev/kubevela/pkg/workflow/context" "github.com/oam-dev/kubevela/pkg/workflow/providers" "github.com/oam-dev/kubevela/pkg/workflow/types" @@ -44,6 +47,7 @@ type Dispatcher func(ctx context.Context, cluster string, owner common.ResourceC type Deleter func(ctx context.Context, cluster string, owner common.ResourceCreatorRole, manifest *unstructured.Unstructured) error type provider struct { + app *v1beta1.Application apply Dispatcher delete Deleter cli client.Client @@ -85,6 +89,7 @@ func (h *provider) Apply(ctx wfContext.Context, v *value.Value, act types.Action return err } deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster) + deployCtx = h.setServiceAccountInContext(deployCtx) if err := h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workload); err != nil { return err } @@ -119,6 +124,7 @@ func (h *provider) ApplyInParallel(ctx wfContext.Context, v *value.Value, act ty return err } deployCtx := multicluster.ContextWithClusterName(context.Background(), cluster) + deployCtx = h.setServiceAccountInContext(deployCtx) if err = h.apply(deployCtx, cluster, common.WorkflowResourceCreator, workloads...); err != nil { return v.FillObject(err, "err") } @@ -145,6 +151,7 @@ func (h *provider) Read(ctx wfContext.Context, v *value.Value, act types.Action) return err } readCtx := multicluster.ContextWithClusterName(context.Background(), cluster) + readCtx = h.setServiceAccountInContext(readCtx) if err := h.cli.Get(readCtx, key, obj); err != nil { return v.FillObject(err.Error(), "err") } @@ -187,6 +194,7 @@ func (h *provider) List(ctx wfContext.Context, v *value.Value, act types.Action) client.MatchingLabels(filter.MatchingLabels), } readCtx := multicluster.ContextWithClusterName(context.Background(), cluster) + readCtx = h.setServiceAccountInContext(readCtx) if err := h.cli.List(readCtx, list, listOpts...); err != nil { return v.FillObject(err.Error(), "err") } @@ -208,15 +216,27 @@ func (h *provider) Delete(ctx wfContext.Context, v *value.Value, act types.Actio return err } deleteCtx := multicluster.ContextWithClusterName(context.Background(), cluster) + deleteCtx = h.setServiceAccountInContext(deleteCtx) if err := h.delete(deleteCtx, cluster, common.WorkflowResourceCreator, obj); err != nil { return v.FillObject(err.Error(), "err") } return nil } +func (h *provider) setServiceAccountInContext(ctx context.Context) context.Context { + if h.app == nil { + return ctx + } + return oamutil.SetServiceAccountInContext(ctx, h.app.Namespace, oam.GetServiceAccountNameFromAnnotations(h.app)) +} + // Install register handlers to provider discover. -func Install(p providers.Providers, cli client.Client, apply Dispatcher, deleter Deleter) { +func Install(p providers.Providers, app *v1beta1.Application, cli client.Client, apply Dispatcher, deleter Deleter) { + if app != nil { + app = app.DeepCopy() + } prd := &provider{ + app: app, apply: apply, delete: deleter, cli: cli, diff --git a/pkg/workflow/tasks/discover.go b/pkg/workflow/tasks/discover.go index 4f58a5ae7..308fd19e1 100644 --- a/pkg/workflow/tasks/discover.go +++ b/pkg/workflow/tasks/discover.go @@ -123,7 +123,7 @@ func NewViewTaskDiscover(pd *packages.PackageDiscover, cli client.Client, cfg *r // install builtin provider query.Install(handlerProviders, cli, cfg) time.Install(handlerProviders) - kube.Install(handlerProviders, cli, apply, delete) + kube.Install(handlerProviders, nil, cli, apply, delete) http.Install(handlerProviders, cli, viewNs) email.Install(handlerProviders) diff --git a/test/e2e-test/application_test.go b/test/e2e-test/application_test.go index d729a441f..16fbad449 100644 --- a/test/e2e-test/application_test.go +++ b/test/e2e-test/application_test.go @@ -25,14 +25,15 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" oamcomm "github.com/oam-dev/kubevela/apis/core.oam.dev/common" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" + "github.com/oam-dev/kubevela/pkg/oam" "github.com/oam-dev/kubevela/pkg/oam/util" "github.com/oam-dev/kubevela/pkg/utils/common" ) @@ -72,6 +73,20 @@ var _ = Describe("Application Normal tests", func() { time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) } + createServiceAccount := func(ns, name string) { + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + } + Eventually( + func() error { + return k8sClient.Create(ctx, &sa) + }, + time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{})) + } + applyApp := func(source string) { By("Apply an application") var newApp v1beta1.Application @@ -106,6 +121,20 @@ var _ = Describe("Application Normal tests", func() { }, time.Second*5, time.Millisecond*500).Should(Succeed()) } + verifyApplicationWorkflowSuspending := func(ns, appName string) { + var testApp v1beta1.Application + Eventually(func() error { + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: ns, Name: appName}, &testApp) + if err != nil { + return err + } + if testApp.Status.Phase != oamcomm.ApplicationWorkflowSuspending { + return fmt.Errorf("application status wants %s, actually %s", oamcomm.ApplicationWorkflowSuspending, testApp.Status.Phase) + } + return nil + }, 120*time.Second, time.Second).Should(BeNil()) + } + verifyWorkloadRunningExpected := func(workloadName string, replicas int32, image string) { var workload v1.Deployment By("Verify Workload running as expected") @@ -252,16 +281,108 @@ var _ = Describe("Application Normal tests", func() { Expect(k8sClient.Create(ctx, &newApp)).Should(BeNil()) By("check application status") - testApp := new(v1beta1.Application) - Eventually(func() error { - err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespaceName, Name: newApp.Name}, testApp) - if err != nil { - return err - } - if testApp.Status.Phase != oamcomm.ApplicationWorkflowSuspending { - return fmt.Errorf("error application status wants %s, actually %s", oamcomm.ApplicationWorkflowSuspending, testApp.Status.Phase) - } - return nil - }, 60*time.Second).Should(BeNil()) + verifyApplicationWorkflowSuspending(newApp.Namespace, newApp.Name) + }) + + It("Test app with ServiceAccount", func() { + By("Creating a ServiceAccount") + const saName = "app-service-account" + createServiceAccount(namespaceName, saName) + + By("Creating Role and RoleBinding") + const roleName = "worker" + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespaceName, + Name: roleName, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{rbacv1.VerbAll}, + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + }, + }, + } + Expect(k8sClient.Create(ctx, &role)).Should(BeNil()) + + roleBinding := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespaceName, + Name: roleName + "-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: saName, + Namespace: namespaceName, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + }, + } + Expect(k8sClient.Create(ctx, &roleBinding)).Should(BeNil()) + + By("Creating an application") + var newApp v1beta1.Application + Expect(common.ReadYamlToObject("testdata/app/app11.yaml", &newApp)).Should(BeNil()) + newApp.Namespace = namespaceName + annotations := newApp.GetAnnotations() + annotations[oam.AnnotationServiceAccountName] = saName + newApp.SetAnnotations(annotations) + Expect(k8sClient.Create(ctx, &newApp)).Should(BeNil()) + + By("Checking an application status") + verifyWorkloadRunningExpected("myweb", 1, "stefanprodan/podinfo:4.0.3") + verifyComponentRevision("myweb", 1) + }) + + It("Test app with ServiceAccount which has no permission for the component", func() { + By("Creating a ServiceAccount") + const saName = "dummy-service-account" + createServiceAccount(namespaceName, saName) + + By("Creating an application") + var newApp v1beta1.Application + Expect(common.ReadYamlToObject("testdata/app/app11.yaml", &newApp)).Should(BeNil()) + newApp.Namespace = namespaceName + annotations := newApp.GetAnnotations() + annotations[oam.AnnotationServiceAccountName] = saName + newApp.SetAnnotations(annotations) + Expect(k8sClient.Create(ctx, &newApp)).Should(BeNil()) + + By("Checking an application status") + verifyApplicationWorkflowSuspending(newApp.Namespace, newApp.Name) + }) + + It("Test app with non-existence ServiceAccount", func() { + By("Ensuring that given service account doesn't exists") + const saName = "not-existing-service-account" + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespaceName, + Name: saName, + }, + } + Eventually( + func() error { + return k8sClient.Delete(ctx, &sa) + }, + time.Second*3, time.Millisecond*300).Should(SatisfyAny(BeNil(), &util.NotFoundMatcher{})) + + By("Creating an application") + var newApp v1beta1.Application + Expect(common.ReadYamlToObject("testdata/app/app11.yaml", &newApp)).Should(BeNil()) + newApp.Namespace = namespaceName + annotations := newApp.GetAnnotations() + annotations[oam.AnnotationServiceAccountName] = saName + newApp.SetAnnotations(annotations) + Expect(k8sClient.Create(ctx, &newApp)).Should(BeNil()) + + By("Checking an application status") + verifyApplicationWorkflowSuspending(newApp.Namespace, newApp.Name) }) }) diff --git a/test/e2e-test/testdata/app/app11.yaml b/test/e2e-test/testdata/app/app11.yaml new file mode 100644 index 000000000..f4e348a49 --- /dev/null +++ b/test/e2e-test/testdata/app/app11.yaml @@ -0,0 +1,15 @@ +apiVersion: core.oam.dev/v1beta1 +kind: Application +metadata: + name: app-service-account-e2e + annotations: + app.oam.dev/service-account-name: default +spec: + components: + - name: myweb + type: worker + properties: + image: "stefanprodan/podinfo:4.0.3" + cmd: + - ./podinfo + - stress-cpu=1