mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
rewrite webhook using controller runtime (#278)
Signed-off-by: ldpliu <daliu@redhat.com> Signed-off-by: ldpliu <daliu@redhat.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -58,6 +58,7 @@ deploy-hub: ensure-kustomize
|
||||
|
||||
deploy-webhook: ensure-kustomize
|
||||
cp deploy/webhook/managedclustersets_conversion_webhook.crd.yaml deploy/webhook/managedclustersets_conversion_webhook.crd.yaml.tmp
|
||||
cp deploy/webhook/webhook.yaml deploy/webhook/webhook.yaml.tmp
|
||||
bash -x hack/inject-ca.sh
|
||||
cp deploy/webhook/kustomization.yaml deploy/webhook/kustomization.yaml.tmp
|
||||
cd deploy/webhook && ../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/registration:latest=$(IMAGE_NAME)
|
||||
@@ -65,6 +66,7 @@ deploy-webhook: ensure-kustomize
|
||||
$(KUSTOMIZE) build deploy/webhook | $(KUBECTL) --kubeconfig $(HUB_KUBECONFIG) apply -f -
|
||||
mv deploy/webhook/kustomization.yaml.tmp deploy/webhook/kustomization.yaml
|
||||
mv -f deploy/webhook/managedclustersets_conversion_webhook.crd.yaml.tmp deploy/webhook/managedclustersets_conversion_webhook.crd.yaml
|
||||
mv deploy/webhook/webhook.yaml.tmp deploy/webhook/webhook.yaml
|
||||
|
||||
cluster-ip:
|
||||
$(KUBECTL) config use-context $(HUB_KUBECONFIG_CONTEXT) --kubeconfig $(HUB_KUBECONFIG)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"open-cluster-management.io/registration/pkg/cmd/hub"
|
||||
"open-cluster-management.io/registration/pkg/cmd/spoke"
|
||||
"open-cluster-management.io/registration/pkg/cmd/webhook"
|
||||
"open-cluster-management.io/registration/pkg/cmd/webhook/conversion"
|
||||
"open-cluster-management.io/registration/pkg/cmd/webhook/admission"
|
||||
"open-cluster-management.io/registration/pkg/version"
|
||||
)
|
||||
|
||||
@@ -60,8 +60,7 @@ func newRegistrationCommand() *cobra.Command {
|
||||
|
||||
cmd.AddCommand(hub.NewController())
|
||||
cmd.AddCommand(spoke.NewAgent())
|
||||
cmd.AddCommand(webhook.NewAdmissionHook())
|
||||
//Conversion webhook
|
||||
cmd.AddCommand(conversion.NewWebhook())
|
||||
cmd.AddCommand(admission.NewAdmissionHook())
|
||||
cmd.AddCommand(webhook.NewWebhook())
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1.admission.cluster.open-cluster-management.io
|
||||
spec:
|
||||
group: admission.cluster.open-cluster-management.io
|
||||
version: v1
|
||||
service:
|
||||
name: managedcluster-admission
|
||||
namespace: open-cluster-management-hub
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 10000
|
||||
versionPriority: 20
|
||||
@@ -16,30 +16,27 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: managedcluster-admission-sa
|
||||
containers:
|
||||
- name: managedcluster-admission
|
||||
image: quay.io/open-cluster-management/registration:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/registration"
|
||||
- "webhook"
|
||||
- "--cert-dir=/tmp"
|
||||
- "--secure-port=6443"
|
||||
# webhook is not hosting any k8s api resource, so it is not subjected to APF feature
|
||||
- "--feature-gates=DefaultClusterSet=true,APIPriorityAndFairness=false"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
runAsNonRoot: true
|
||||
- name: conversion-webhook
|
||||
- name: webhook
|
||||
image: quay.io/open-cluster-management/registration:latest
|
||||
args:
|
||||
- /registration
|
||||
- "webhook-server"
|
||||
- "port=9443"
|
||||
- "--feature-gates=DefaultClusterSet=true"
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTP
|
||||
port: 8000
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
scheme: HTTP
|
||||
port: 8000
|
||||
initialDelaySeconds: 2
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
protocol: TCP
|
||||
|
||||
@@ -25,7 +25,6 @@ namespace: open-cluster-management-hub
|
||||
# here. These are deployed via a "make install" dependency.
|
||||
|
||||
resources:
|
||||
- ./apiservice.yaml
|
||||
- ./clusterrole_binding.yaml
|
||||
- ./clusterrole.yaml
|
||||
- ./deployment.yaml
|
||||
|
||||
@@ -6,9 +6,6 @@ spec:
|
||||
selector:
|
||||
app: managedcluster-admission
|
||||
ports:
|
||||
- name: webhook-server
|
||||
port: 443
|
||||
targetPort: 6443
|
||||
- name: conversion-webhook
|
||||
port: 9443
|
||||
targetPort: 9443
|
||||
|
||||
@@ -7,10 +7,11 @@ webhooks:
|
||||
failurePolicy: Fail
|
||||
clientConfig:
|
||||
service:
|
||||
# reach the webhook via the registered aggregated API
|
||||
namespace: default
|
||||
name: kubernetes
|
||||
path: /apis/admission.cluster.open-cluster-management.io/v1/managedclustervalidators
|
||||
namespace: open-cluster-management-hub
|
||||
name: managedcluster-admission
|
||||
path: /validate-cluster-open-cluster-management-io-v1-managedcluster
|
||||
port: 9443
|
||||
caBundle: CA_PLACE_HOLDER
|
||||
rules:
|
||||
- operations:
|
||||
- CREATE
|
||||
@@ -21,7 +22,7 @@ webhooks:
|
||||
- "*"
|
||||
resources:
|
||||
- managedclusters
|
||||
admissionReviewVersions: ["v1beta1"]
|
||||
admissionReviewVersions: ["v1beta1","v1"]
|
||||
sideEffects: None
|
||||
timeoutSeconds: 3
|
||||
|
||||
@@ -36,10 +37,11 @@ webhooks:
|
||||
failurePolicy: Fail
|
||||
clientConfig:
|
||||
service:
|
||||
# reach the webhook via the registered aggregated API
|
||||
namespace: default
|
||||
name: kubernetes
|
||||
path: /apis/admission.cluster.open-cluster-management.io/v1/managedclustermutators
|
||||
namespace: open-cluster-management-hub
|
||||
name: managedcluster-admission
|
||||
path: /mutate-cluster-open-cluster-management-io-v1-managedcluster
|
||||
port: 9443
|
||||
caBundle: CA_PLACE_HOLDER
|
||||
rules:
|
||||
- operations:
|
||||
- CREATE
|
||||
@@ -50,7 +52,7 @@ webhooks:
|
||||
- "*"
|
||||
resources:
|
||||
- managedclusters
|
||||
admissionReviewVersions: ["v1beta1"]
|
||||
admissionReviewVersions: ["v1beta1","v1"]
|
||||
sideEffects: None
|
||||
timeoutSeconds: 3
|
||||
|
||||
@@ -65,10 +67,11 @@ webhooks:
|
||||
failurePolicy: Fail
|
||||
clientConfig:
|
||||
service:
|
||||
# reach the webhook via the registered aggregated API
|
||||
namespace: default
|
||||
name: kubernetes
|
||||
path: /apis/admission.cluster.open-cluster-management.io/v1/managedclustersetbindingvalidators
|
||||
namespace: open-cluster-management-hub
|
||||
name: managedcluster-admission
|
||||
path: /validate-cluster-open-cluster-management-io-v1beta1-managedclustersetbinding
|
||||
port: 9443
|
||||
caBundle: CA_PLACE_HOLDER
|
||||
rules:
|
||||
- operations:
|
||||
- CREATE
|
||||
@@ -76,9 +79,39 @@ webhooks:
|
||||
apiGroups:
|
||||
- cluster.open-cluster-management.io
|
||||
apiVersions:
|
||||
- "*"
|
||||
- v1beta1
|
||||
resources:
|
||||
- managedclustersetbindings
|
||||
admissionReviewVersions: ["v1beta1"]
|
||||
admissionReviewVersions: ["v1beta1","v1"]
|
||||
sideEffects: None
|
||||
timeoutSeconds: 3
|
||||
|
||||
---
|
||||
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: managedclustersetbindingv1beta2validators.admission.cluster.open-cluster-management.io
|
||||
webhooks:
|
||||
- name: managedclustersetbindingv1beta2validators.admission.cluster.open-cluster-management.io
|
||||
failurePolicy: Fail
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: open-cluster-management-hub
|
||||
name: managedcluster-admission
|
||||
path: /validate-cluster-open-cluster-management-io-v1beta2-managedclustersetbinding
|
||||
port: 9443
|
||||
caBundle: CA_PLACE_HOLDER
|
||||
rules:
|
||||
- operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
apiGroups:
|
||||
- cluster.open-cluster-management.io
|
||||
apiVersions:
|
||||
- v1beta2
|
||||
resources:
|
||||
- managedclustersetbindings
|
||||
admissionReviewVersions: ["v1beta1","v1"]
|
||||
sideEffects: None
|
||||
timeoutSeconds: 3
|
||||
|
||||
2
go.mod
2
go.mod
@@ -21,7 +21,6 @@ require (
|
||||
k8s.io/client-go v0.24.3
|
||||
k8s.io/component-base v0.24.3
|
||||
k8s.io/klog/v2 v2.70.1
|
||||
k8s.io/kube-aggregator v0.24.3
|
||||
k8s.io/utils v0.0.0-20220713171938-56c0de1e6f5e
|
||||
open-cluster-management.io/api v0.8.1-0.20221008072653-71a179ef201c
|
||||
sigs.k8s.io/controller-runtime v0.12.3
|
||||
@@ -117,6 +116,7 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||
k8s.io/kube-aggregator v0.24.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
|
||||
@@ -8,6 +8,8 @@ CA=`cat $BASE_DIR/cert/tls.crt |base64 -w 0`
|
||||
|
||||
sed -i "s/CA_PLACE_HOLDER/${CA}/g" $BASE_DIR/../deploy/webhook/managedclustersets_conversion_webhook.crd.yaml
|
||||
|
||||
sed -i "s/CA_PLACE_HOLDER/${CA}/g" $BASE_DIR/../deploy/webhook/webhook.yaml
|
||||
|
||||
rm -rf $BASE_DIR/../deploy/webhook/cert
|
||||
|
||||
mv -f $BASE_DIR/cert $BASE_DIR/../deploy/webhook/cert
|
||||
|
||||
88
pkg/cmd/webhook/admission/webhook.go
Normal file
88
pkg/cmd/webhook/admission/webhook.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package admission
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
admissionserver "github.com/openshift/generic-admission-server/pkg/cmd/server"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clusterwebhook "open-cluster-management.io/registration/pkg/webhook/cluster"
|
||||
clustersetbindingwebhook "open-cluster-management.io/registration/pkg/webhook/clustersetbinding"
|
||||
)
|
||||
|
||||
func NewAdmissionHook() *cobra.Command {
|
||||
ops := NewOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "webhook",
|
||||
Short: "Start Managed Cluster Admission Server",
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
stopCh := genericapiserver.SetupSignalHandler()
|
||||
|
||||
if err := ops.ServerOptions.Complete(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ops.ServerOptions.Validate(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ops.RunAdmissionServer(ops.ServerOptions, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
ops.AddFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Config contains the server (the webhook) cert and key.
|
||||
type Options struct {
|
||||
QPS float32
|
||||
Burst int
|
||||
ServerOptions *admissionserver.AdmissionServerOptions
|
||||
}
|
||||
|
||||
// NewOptions constructs a new set of default options for webhook.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
QPS: 100.0,
|
||||
Burst: 200,
|
||||
ServerOptions: admissionserver.NewAdmissionServerOptions(
|
||||
os.Stdout,
|
||||
os.Stderr,
|
||||
&clusterwebhook.ManagedClusterValidatingAdmissionHook{},
|
||||
&clusterwebhook.ManagedClusterMutatingAdmissionHook{},
|
||||
&clustersetbindingwebhook.ManagedClusterSetBindingValidatingAdmissionHook{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Options) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float32Var(&c.QPS, "max-qps", c.QPS,
|
||||
"Maximum QPS to the hub server from this webhook.")
|
||||
fs.IntVar(&c.Burst, "max-burst", c.Burst,
|
||||
"Maximum burst for throttle.")
|
||||
|
||||
featureGate := utilfeature.DefaultMutableFeatureGate
|
||||
featureGate.AddFlag(fs)
|
||||
|
||||
c.ServerOptions.RecommendedOptions.FeatureGate = featureGate
|
||||
c.ServerOptions.RecommendedOptions.AddFlags(fs)
|
||||
}
|
||||
|
||||
// change the default QPS and Butst, so rewrite this func
|
||||
func (c *Options) RunAdmissionServer(o *admissionserver.AdmissionServerOptions, stopCh <-chan struct{}) error {
|
||||
config, err := o.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.RestConfig.QPS = c.QPS
|
||||
config.RestConfig.Burst = c.Burst
|
||||
server, err := config.Complete().New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return server.GenericAPIServer.PrepareRun().Run(stopCh)
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package conversion
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewWebhook() *cobra.Command {
|
||||
ops := NewOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "webhook-server",
|
||||
Short: "Start the webhook server",
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
err := ops.RunWebhookServer()
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
ops.AddFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package conversion
|
||||
package webhook
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package conversion
|
||||
package webhook
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
internalv1 "open-cluster-management.io/registration/pkg/webhook/v1"
|
||||
internalv1beta1 "open-cluster-management.io/registration/pkg/webhook/v1beta1"
|
||||
internalv1beta2 "open-cluster-management.io/registration/pkg/webhook/v1beta2"
|
||||
|
||||
@@ -24,6 +26,7 @@ var (
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(clusterv1.Install(scheme))
|
||||
utilruntime.Must(internalv1beta1.Install(scheme))
|
||||
utilruntime.Must(internalv1beta2.Install(scheme))
|
||||
}
|
||||
@@ -40,6 +43,7 @@ func (c *Options) RunWebhookServer() error {
|
||||
klog.Error(err, "unable to start manager")
|
||||
return err
|
||||
}
|
||||
|
||||
// add healthz/readyz check handler
|
||||
if err := mgr.AddHealthzCheck("healthz-ping", healthz.Ping); err != nil {
|
||||
klog.Errorf("unable to add healthz check handler: %v", err)
|
||||
@@ -50,12 +54,25 @@ func (c *Options) RunWebhookServer() error {
|
||||
klog.Errorf("unable to add readyz check handler: %v", err)
|
||||
return err
|
||||
}
|
||||
if err = internalv1beta1.SetupWebhookWithManager(mgr); err != nil {
|
||||
klog.Error(err, "unable to create webhook", "webhook", "ManagedClusterSet")
|
||||
|
||||
if err = (&internalv1.ManagedClusterWebhook{}).Init(mgr); err != nil {
|
||||
klog.Error(err, "unable to create ManagedCluster webhook")
|
||||
return err
|
||||
}
|
||||
if err = internalv1beta2.SetupWebhookWithManager(mgr); err != nil {
|
||||
klog.Error(err, "unable to create webhook", "webhook", "ManagedClusterSet")
|
||||
if err = (&internalv1beta1.ManagedClusterSetBindingWebhook{}).Init(mgr); err != nil {
|
||||
klog.Error(err, "unable to create ManagedClusterSetBinding webhook", "v1beta1")
|
||||
return err
|
||||
}
|
||||
if err = (&internalv1beta2.ManagedClusterSetBindingWebhook{}).Init(mgr); err != nil {
|
||||
klog.Error(err, "unable to create ManagedClusterSetBinding webhook", "v1beta1")
|
||||
return err
|
||||
}
|
||||
if err = (&internalv1beta1.ManagedClusterSet{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
klog.Error(err, "unable to create ManagedClusterSet webhook", "v1beta1")
|
||||
return err
|
||||
}
|
||||
if err = (&internalv1beta2.ManagedClusterSet{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
klog.Error(err, "unable to create ManagedClusterSet webhook", "v1beta2")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,88 +1,24 @@
|
||||
package webhook
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
admissionserver "github.com/openshift/generic-admission-server/pkg/cmd/server"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clusterwebhook "open-cluster-management.io/registration/pkg/webhook/cluster"
|
||||
clustersetbindingwebhook "open-cluster-management.io/registration/pkg/webhook/clustersetbinding"
|
||||
"open-cluster-management.io/registration/pkg/features"
|
||||
)
|
||||
|
||||
func NewAdmissionHook() *cobra.Command {
|
||||
func NewWebhook() *cobra.Command {
|
||||
ops := NewOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "webhook",
|
||||
Short: "Start Managed Cluster Admission Server",
|
||||
Use: "webhook-server",
|
||||
Short: "Start the webhook server",
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
stopCh := genericapiserver.SetupSignalHandler()
|
||||
|
||||
if err := ops.ServerOptions.Complete(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ops.ServerOptions.Validate(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ops.RunAdmissionServer(ops.ServerOptions, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
err := ops.RunWebhookServer()
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
ops.AddFlags(flags)
|
||||
|
||||
features.DefaultHubMutableFeatureGate.AddFlag(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Config contains the server (the webhook) cert and key.
|
||||
type Options struct {
|
||||
QPS float32
|
||||
Burst int
|
||||
ServerOptions *admissionserver.AdmissionServerOptions
|
||||
}
|
||||
|
||||
// NewOptions constructs a new set of default options for webhook.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
QPS: 100.0,
|
||||
Burst: 200,
|
||||
ServerOptions: admissionserver.NewAdmissionServerOptions(
|
||||
os.Stdout,
|
||||
os.Stderr,
|
||||
&clusterwebhook.ManagedClusterValidatingAdmissionHook{},
|
||||
&clusterwebhook.ManagedClusterMutatingAdmissionHook{},
|
||||
&clustersetbindingwebhook.ManagedClusterSetBindingValidatingAdmissionHook{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Options) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float32Var(&c.QPS, "max-qps", c.QPS,
|
||||
"Maximum QPS to the hub server from this webhook.")
|
||||
fs.IntVar(&c.Burst, "max-burst", c.Burst,
|
||||
"Maximum burst for throttle.")
|
||||
|
||||
featureGate := utilfeature.DefaultMutableFeatureGate
|
||||
featureGate.AddFlag(fs)
|
||||
|
||||
c.ServerOptions.RecommendedOptions.FeatureGate = featureGate
|
||||
c.ServerOptions.RecommendedOptions.AddFlags(fs)
|
||||
}
|
||||
|
||||
// change the default QPS and Butst, so rewrite this func
|
||||
func (c *Options) RunAdmissionServer(o *admissionserver.AdmissionServerOptions, stopCh <-chan struct{}) error {
|
||||
config, err := o.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.RestConfig.QPS = c.QPS
|
||||
config.RestConfig.Burst = c.Burst
|
||||
server, err := config.Complete().New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return server.GenericAPIServer.PrepareRun().Run(stopCh)
|
||||
}
|
||||
|
||||
126
pkg/webhook/v1/managedcluster_mutating.go
Normal file
126
pkg/webhook/v1/managedcluster_mutating.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
"open-cluster-management.io/registration/pkg/features"
|
||||
"open-cluster-management.io/registration/pkg/helpers"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
var (
|
||||
nowFunc = time.Now
|
||||
defaultClusterSetName = "default"
|
||||
_ webhook.CustomDefaulter = &ManagedClusterWebhook{}
|
||||
)
|
||||
|
||||
func (r *ManagedClusterWebhook) Default(ctx context.Context, obj runtime.Object) error {
|
||||
req, err := admission.RequestFromContext(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
var oldManagedCluster *clusterv1.ManagedCluster
|
||||
if len(req.OldObject.Raw) > 0 {
|
||||
cluster := &clusterv1.ManagedCluster{}
|
||||
if err := json.Unmarshal(req.OldObject.Raw, cluster); err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
oldManagedCluster = cluster
|
||||
}
|
||||
|
||||
managedCluster, ok := obj.(*clusterv1.ManagedCluster)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request cluster obj format is not right")
|
||||
}
|
||||
|
||||
//Generate taints
|
||||
err = r.processTaints(managedCluster, oldManagedCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//Set default clusterset label
|
||||
if features.DefaultHubMutableFeatureGate.Enabled(ocmfeature.DefaultClusterSet) {
|
||||
r.addDefaultClusterSetLabel(managedCluster)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processTaints set cluster taints
|
||||
func (r *ManagedClusterWebhook) processTaints(managedCluster, oldManagedCluster *clusterv1.ManagedCluster) error {
|
||||
if len(managedCluster.Spec.Taints) == 0 {
|
||||
return nil
|
||||
}
|
||||
now := metav1.NewTime(nowFunc())
|
||||
var invalidTaints []string
|
||||
for index, taint := range managedCluster.Spec.Taints {
|
||||
originalTaint := helpers.FindTaintByKey(oldManagedCluster, taint.Key)
|
||||
switch {
|
||||
case oldManagedCluster == nil:
|
||||
// handle CREATE operation.
|
||||
// The request will not be denied if it has taints with timeAdded specified,
|
||||
// while the specified values will be ignored.
|
||||
managedCluster.Spec.Taints[index].TimeAdded = now
|
||||
case originalTaint == nil:
|
||||
// handle UPDATE operation.
|
||||
// new taint
|
||||
// The request will be denied if it has any taint with timeAdded specified.
|
||||
if !taint.TimeAdded.IsZero() {
|
||||
invalidTaints = append(invalidTaints, taint.Key)
|
||||
continue
|
||||
}
|
||||
managedCluster.Spec.Taints[index].TimeAdded = now
|
||||
case originalTaint.Value == taint.Value && originalTaint.Effect == taint.Effect:
|
||||
// handle UPDATE operation.
|
||||
// no change
|
||||
// The request will be denied if it has any taint with different timeAdded specified.
|
||||
if !originalTaint.TimeAdded.Equal(&taint.TimeAdded) {
|
||||
invalidTaints = append(invalidTaints, taint.Key)
|
||||
}
|
||||
default:
|
||||
// handle UPDATE operation.
|
||||
// taint's value/effect has changed
|
||||
// The request will be denied if it has any taint with timeAdded specified.
|
||||
if !taint.TimeAdded.IsZero() {
|
||||
invalidTaints = append(invalidTaints, taint.Key)
|
||||
continue
|
||||
}
|
||||
managedCluster.Spec.Taints[index].TimeAdded = now
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidTaints) == 0 {
|
||||
return nil
|
||||
}
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("It is not allowed to set TimeAdded of Taint %q.", strings.Join(invalidTaints, ",")))
|
||||
}
|
||||
|
||||
// addDefaultClusterSetLabel add label "cluster.open-cluster-management.io/clusterset:default" for ManagedCluster if the managedCluster has no ManagedClusterSet label
|
||||
func (a *ManagedClusterWebhook) addDefaultClusterSetLabel(managedCluster *clusterv1.ManagedCluster) {
|
||||
if len(managedCluster.Labels) == 0 {
|
||||
managedCluster.Labels = map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
clusterSetName, ok := managedCluster.Labels[clusterv1beta2.ClusterSetLabel]
|
||||
// Clusterset label do not exist or "", set default clusterset label
|
||||
if !ok || len(clusterSetName) == 0 {
|
||||
managedCluster.Labels[clusterv1beta2.ClusterSetLabel] = defaultClusterSetName
|
||||
}
|
||||
return
|
||||
}
|
||||
521
pkg/webhook/v1/managedcluster_mutating_test.go
Normal file
521
pkg/webhook/v1/managedcluster_mutating_test.go
Normal file
@@ -0,0 +1,521 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apiruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
"open-cluster-management.io/registration/pkg/features"
|
||||
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
)
|
||||
|
||||
func TestDefault(t *testing.T) {
|
||||
now := time.Now()
|
||||
cases := []struct {
|
||||
name string
|
||||
cluster *v1.ManagedCluster
|
||||
oldCluster *v1.ManagedCluster
|
||||
expectCluster *v1.ManagedCluster
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "Empty spec cluster",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "New taint",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectPreferNoSelect,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectPreferNoSelect,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "New taint with timeAdded specified",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectPreferNoSelect,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectPreferNoSelect,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update taint",
|
||||
expectedError: false,
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelectIfNew,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelectIfNew,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "taint update request denied",
|
||||
expectedError: true,
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -20*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelectIfNew,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelectIfNew,
|
||||
TimeAdded: newTime(now, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete taint",
|
||||
expectedError: false,
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
{
|
||||
Key: "c",
|
||||
Value: "d",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
Effect: v1.TaintEffectNoSelect,
|
||||
TimeAdded: newTime(now, -10*time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has clusterset label",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: "s1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: "s1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has default clusterset label",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has null clusterset label",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Has other label",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
"k": "v",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
Labels: map[string]string{
|
||||
"k": "v",
|
||||
clusterv1beta2.ClusterSetLabel: defaultClusterSetName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
runtime.Must(features.DefaultHubMutableFeatureGate.Add(ocmfeature.DefaultHubRegistrationFeatureGates))
|
||||
if err := features.DefaultHubMutableFeatureGate.Set(fmt.Sprintf("%s=true", string(ocmfeature.DefaultClusterSet))); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
w := ManagedClusterWebhook{}
|
||||
var oldClusterBytes []byte
|
||||
if c.oldCluster == nil {
|
||||
oldClusterBytes = []byte{}
|
||||
} else {
|
||||
oldClusterBytes, _ = json.Marshal(c.oldCluster)
|
||||
}
|
||||
clusterBytes, _ := json.Marshal(c.cluster)
|
||||
req := admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "test.open-cluster-management.io",
|
||||
Version: "v1",
|
||||
Resource: "tests",
|
||||
},
|
||||
OldObject: apiruntime.RawExtension{
|
||||
Raw: oldClusterBytes,
|
||||
},
|
||||
Object: apiruntime.RawExtension{
|
||||
Raw: clusterBytes,
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := admission.NewContextWithRequest(context.Background(), req)
|
||||
|
||||
cluster := &clusterv1.ManagedCluster{}
|
||||
if err := json.Unmarshal(clusterBytes, cluster); err != nil {
|
||||
}
|
||||
err := w.Default(ctx, cluster)
|
||||
if err != nil || c.expectCluster != nil {
|
||||
if err != nil && !c.expectedError {
|
||||
t.Errorf("Case:%v, Expect nil but got Error, err: %v", c.name, err)
|
||||
}
|
||||
if err == nil && c.expectedError {
|
||||
t.Errorf("Case:%v, Expect Error but got nil", c.name)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(cluster.Labels, c.expectCluster.Labels) {
|
||||
t.Errorf("Case:%v, Expect cluster label is not same as return cluster. expect:%v,return:%v", c.name, c.expectCluster.Labels, c.cluster.Labels)
|
||||
}
|
||||
if !DiffTaintTime(cluster.Spec.Taints, c.expectCluster.Spec.Taints) {
|
||||
t.Errorf("Case:%v, Expect cluster taits:%v, return cluster taints:%v", c.name, c.expectCluster.Spec.Taints, c.cluster.Spec.Taints)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func DiffTaintTime(src, dest []v1.Taint) bool {
|
||||
if len(src) != len(dest) {
|
||||
return false
|
||||
}
|
||||
for k := range src {
|
||||
if src[k].TimeAdded.Minute() != dest[k].TimeAdded.Minute() || src[k].TimeAdded.Second() != dest[k].TimeAdded.Second() {
|
||||
fmt.Printf("src:%v, \ndest:%v\n", src[k].TimeAdded.String(), dest[k].TimeAdded.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newTime(time time.Time, offset time.Duration) metav1.Time {
|
||||
mt := metav1.NewTime(time.Add(offset))
|
||||
return mt
|
||||
}
|
||||
233
pkg/webhook/v1/managedcluster_validating.go
Normal file
233
pkg/webhook/v1/managedcluster_validating.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/registration/pkg/helpers"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
var _ webhook.CustomValidator = &ManagedClusterWebhook{}
|
||||
|
||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (r *ManagedClusterWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error {
|
||||
managedCluster, ok := obj.(*v1.ManagedCluster)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request cluster obj format is not right")
|
||||
}
|
||||
req, err := admission.RequestFromContext(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
//Validate if Spec.ManagedClusterClientConfigs is Valid HTTPS URL
|
||||
err = r.validateManagedClusterObj(*managedCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the HubAcceptsClient field is changed, we need to check the request user whether
|
||||
// has been allowed to change the HubAcceptsClient field with SubjectAccessReview api
|
||||
if managedCluster.Spec.HubAcceptsClient {
|
||||
err := r.allowUpdateAcceptField(managedCluster.Name, req.UserInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check whether the request user has been allowed to set clusterset label
|
||||
var clusterSetName string
|
||||
if len(managedCluster.Labels) > 0 {
|
||||
clusterSetName = managedCluster.Labels[clusterv1beta2.ClusterSetLabel]
|
||||
}
|
||||
|
||||
return r.allowSetClusterSetLabel(req.UserInfo, "", clusterSetName)
|
||||
}
|
||||
|
||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (r *ManagedClusterWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
managedCluster, ok := newObj.(*v1.ManagedCluster)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request new cluster obj format is not right")
|
||||
}
|
||||
oldManagedCluster, ok := oldObj.(*v1.ManagedCluster)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request old cluster obj format is not right")
|
||||
}
|
||||
req, err := admission.RequestFromContext(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
|
||||
//Validate if Spec.ManagedClusterClientConfigs is Valid HTTPS URL
|
||||
err = r.validateManagedClusterObj(*managedCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the HubAcceptsClient field is changed, we need to check the request user whether
|
||||
// has been allowed to change the HubAcceptsClient field with SubjectAccessReview api
|
||||
if managedCluster.Spec.HubAcceptsClient {
|
||||
err := r.allowUpdateAcceptField(managedCluster.Name, req.UserInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check whether the request user has been allowed to set clusterset label
|
||||
var originalClusterSetName, currentClusterSetName string
|
||||
if len(oldManagedCluster.Labels) > 0 {
|
||||
originalClusterSetName = oldManagedCluster.Labels[clusterv1beta2.ClusterSetLabel]
|
||||
}
|
||||
if len(managedCluster.Labels) > 0 {
|
||||
currentClusterSetName = managedCluster.Labels[clusterv1beta2.ClusterSetLabel]
|
||||
}
|
||||
|
||||
return r.allowSetClusterSetLabel(req.UserInfo, originalClusterSetName, currentClusterSetName)
|
||||
}
|
||||
|
||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||
func (r *ManagedClusterWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateManagedClusterObj validates the fileds of ManagedCluster object
|
||||
func (r *ManagedClusterWebhook) validateManagedClusterObj(cluster v1.ManagedCluster) error {
|
||||
errs := []error{}
|
||||
|
||||
// there are no spoke client configs, finish the validation process
|
||||
if len(cluster.Spec.ManagedClusterClientConfigs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate the url in spoke client configs
|
||||
for _, clientConfig := range cluster.Spec.ManagedClusterClientConfigs {
|
||||
if !helpers.IsValidHTTPSURL(clientConfig.URL) {
|
||||
errs = append(errs, fmt.Errorf("url %q is invalid in client configs", clientConfig.URL))
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return apierrors.NewBadRequest(operatorhelpers.NewMultiLineAggregate(errs).Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// allowUpdateHubAcceptsClientField using SubjectAccessReview API to check whether a request user has been authorized to update
|
||||
// HubAcceptsClient field
|
||||
func (r *ManagedClusterWebhook) allowUpdateAcceptField(clusterName string, userInfo authenticationv1.UserInfo) error {
|
||||
extra := make(map[string]authorizationv1.ExtraValue)
|
||||
for k, v := range userInfo.Extra {
|
||||
extra[k] = authorizationv1.ExtraValue(v)
|
||||
}
|
||||
|
||||
sar := &authorizationv1.SubjectAccessReview{
|
||||
Spec: authorizationv1.SubjectAccessReviewSpec{
|
||||
User: userInfo.Username,
|
||||
UID: userInfo.UID,
|
||||
Groups: userInfo.Groups,
|
||||
Extra: extra,
|
||||
ResourceAttributes: &authorizationv1.ResourceAttributes{
|
||||
Group: "register.open-cluster-management.io",
|
||||
Resource: "managedclusters",
|
||||
Verb: "update",
|
||||
Subresource: "accept",
|
||||
Name: clusterName,
|
||||
},
|
||||
},
|
||||
}
|
||||
sar, err := r.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return apierrors.NewForbidden(
|
||||
v1.Resource("managedclusters/accept"),
|
||||
clusterName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if !sar.Status.Allowed {
|
||||
return apierrors.NewForbidden(
|
||||
v1.Resource("managedclusters/accept"),
|
||||
clusterName,
|
||||
fmt.Errorf("user %q cannot update the HubAcceptsClient field", userInfo.Username),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allowSetClusterSetLabel checks whether a request user has been authorized to set clusterset label
|
||||
func (r *ManagedClusterWebhook) allowSetClusterSetLabel(userInfo authenticationv1.UserInfo, originalClusterSet, newClusterSet string) error {
|
||||
if originalClusterSet == newClusterSet {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(originalClusterSet) > 0 {
|
||||
err := r.allowUpdateClusterSet(userInfo, originalClusterSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(newClusterSet) > 0 {
|
||||
err := r.allowUpdateClusterSet(userInfo, newClusterSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allowUpdateClusterSet checks whether a request user has been authorized to add/remove a ManagedCluster
|
||||
// to/from the ManagedClusterSet
|
||||
func (r *ManagedClusterWebhook) allowUpdateClusterSet(userInfo authenticationv1.UserInfo, clusterSetName string) error {
|
||||
extra := make(map[string]authorizationv1.ExtraValue)
|
||||
for k, v := range userInfo.Extra {
|
||||
extra[k] = authorizationv1.ExtraValue(v)
|
||||
}
|
||||
|
||||
sar := &authorizationv1.SubjectAccessReview{
|
||||
Spec: authorizationv1.SubjectAccessReviewSpec{
|
||||
User: userInfo.Username,
|
||||
UID: userInfo.UID,
|
||||
Groups: userInfo.Groups,
|
||||
Extra: extra,
|
||||
ResourceAttributes: &authorizationv1.ResourceAttributes{
|
||||
Group: "cluster.open-cluster-management.io",
|
||||
Resource: "managedclustersets",
|
||||
Subresource: "join",
|
||||
Name: clusterSetName,
|
||||
Verb: "create",
|
||||
},
|
||||
},
|
||||
}
|
||||
sar, err := r.kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return apierrors.NewForbidden(
|
||||
v1.Resource("managedclustersets/join"),
|
||||
clusterSetName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if !sar.Status.Allowed {
|
||||
return apierrors.NewForbidden(
|
||||
v1.Resource("managedclustersets/join"),
|
||||
clusterSetName,
|
||||
fmt.Errorf("user %q cannot add/remove a ManagedCluster to/from ManagedClusterSet %q", userInfo.Username, clusterSetName),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
459
pkg/webhook/v1/managedcluster_validating_test.go
Normal file
459
pkg/webhook/v1/managedcluster_validating_test.go
Normal file
@@ -0,0 +1,459 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
"open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
func TestValidateCreate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
cluster *v1.ManagedCluster
|
||||
expectedError bool
|
||||
allowUpdateAcceptField bool
|
||||
allowClusterset bool
|
||||
allowUpdateClusterSets map[string]bool
|
||||
}{
|
||||
{
|
||||
name: "Empty spec cluster",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate creating an accepted ManagedCluster without permission",
|
||||
expectedError: true,
|
||||
allowUpdateAcceptField: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate creating an accepted ManagedCluster with permission",
|
||||
expectedError: false,
|
||||
allowUpdateAcceptField: true,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label",
|
||||
expectedError: false,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": true,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label without permission",
|
||||
expectedError: true,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": false,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate create cluster with invalid config",
|
||||
expectedError: true,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []v1.ClientConfig{
|
||||
{URL: "http://127.0.0.1:8001"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate create cluster with valid config",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []v1.ClientConfig{
|
||||
{URL: "https://127.0.0.1:8001"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := kubefake.NewSimpleClientset()
|
||||
kubeClient.PrependReactor(
|
||||
"create",
|
||||
"subjectaccessreviews",
|
||||
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
allowed := false
|
||||
|
||||
sar := action.(clienttesting.CreateAction).GetObject().(*authorizationv1.SubjectAccessReview)
|
||||
switch sar.Spec.ResourceAttributes.Resource {
|
||||
case "managedclusters":
|
||||
allowed = c.allowUpdateAcceptField
|
||||
case "managedclustersets":
|
||||
allowed = c.allowUpdateClusterSets[sar.Spec.ResourceAttributes.Name]
|
||||
}
|
||||
|
||||
return true, &authorizationv1.SubjectAccessReview{
|
||||
Status: authorizationv1.SubjectAccessReviewStatus{
|
||||
Allowed: allowed,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
w := ManagedClusterWebhook{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
req := admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "test.open-cluster-management.io",
|
||||
Version: "v1",
|
||||
Resource: "tests",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := admission.NewContextWithRequest(context.Background(), req)
|
||||
|
||||
err := w.ValidateCreate(ctx, c.cluster)
|
||||
if err != nil && !c.expectedError {
|
||||
t.Errorf("Case:%v, Expect nil but got Error, err: %v", c.name, err)
|
||||
}
|
||||
if err == nil && c.expectedError {
|
||||
t.Errorf("Case:%v, Expect Error but got nil", c.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
w := ManagedClusterWebhook{}
|
||||
err := w.ValidateCreate(context.Background(), &v1beta1.ManagedClusterSet{})
|
||||
if err == nil {
|
||||
t.Errorf("Non cluster obj, Expect Error but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateUpdate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
cluster *v1.ManagedCluster
|
||||
oldCluster *v1.ManagedCluster
|
||||
expectedError bool
|
||||
allowUpdateAcceptField bool
|
||||
allowClusterset bool
|
||||
allowUpdateClusterSets map[string]bool
|
||||
}{
|
||||
{
|
||||
name: "validate update an accepted ManagedCluster without permission",
|
||||
expectedError: true,
|
||||
allowUpdateAcceptField: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate updating an accepted ManagedCluster with permission",
|
||||
expectedError: false,
|
||||
allowUpdateAcceptField: true,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set-1",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
HubAcceptsClient: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label",
|
||||
expectedError: false,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": true,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label without permission",
|
||||
expectedError: true,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": false,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label with 2 set permission",
|
||||
expectedError: false,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": true,
|
||||
"clusterset2": true,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label without permission",
|
||||
expectedError: true,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": false,
|
||||
"clusterset2": false,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate remove clusterset label without permission",
|
||||
expectedError: false,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": true,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate setting clusterset label with 1 set permission",
|
||||
expectedError: true,
|
||||
allowUpdateClusterSets: map[string]bool{
|
||||
"clusterset1": true,
|
||||
"clusterset2": false,
|
||||
},
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset1",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Labels: map[string]string{
|
||||
v1beta1.ClusterSetLabel: "clusterset2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate update cluster with invalid config",
|
||||
expectedError: true,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []v1.ClientConfig{
|
||||
{URL: "http://127.0.0.1:8001"},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validate update cluster with valid config",
|
||||
expectedError: false,
|
||||
cluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []v1.ClientConfig{
|
||||
{URL: "https://127.0.0.1:8001"},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldCluster: &v1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: v1.ManagedClusterSpec{
|
||||
ManagedClusterClientConfigs: []v1.ClientConfig{
|
||||
{URL: "https://127.0.0.1:8002"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := kubefake.NewSimpleClientset()
|
||||
kubeClient.PrependReactor(
|
||||
"create",
|
||||
"subjectaccessreviews",
|
||||
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
allowed := false
|
||||
|
||||
sar := action.(clienttesting.CreateAction).GetObject().(*authorizationv1.SubjectAccessReview)
|
||||
switch sar.Spec.ResourceAttributes.Resource {
|
||||
case "managedclusters":
|
||||
allowed = c.allowUpdateAcceptField
|
||||
case "managedclustersets":
|
||||
allowed = c.allowUpdateClusterSets[sar.Spec.ResourceAttributes.Name]
|
||||
}
|
||||
|
||||
return true, &authorizationv1.SubjectAccessReview{
|
||||
Status: authorizationv1.SubjectAccessReviewStatus{
|
||||
Allowed: allowed,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
w := ManagedClusterWebhook{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
req := admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "test.open-cluster-management.io",
|
||||
Version: "v1",
|
||||
Resource: "tests",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := admission.NewContextWithRequest(context.Background(), req)
|
||||
|
||||
err := w.ValidateUpdate(ctx, c.oldCluster, c.cluster)
|
||||
if err != nil && !c.expectedError {
|
||||
t.Errorf("Case:%v, Expect nil but got error: %v", c.name, err)
|
||||
}
|
||||
if err == nil && c.expectedError {
|
||||
t.Errorf("Case:%v, Expect Error but got nil:%v", c.name, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
w := ManagedClusterWebhook{}
|
||||
err := w.ValidateUpdate(context.Background(), nil, &v1beta1.ManagedClusterSetBinding{})
|
||||
if err == nil {
|
||||
t.Errorf("Non cluster obj, Expect Error but got nil")
|
||||
}
|
||||
}
|
||||
28
pkg/webhook/v1/webhook.go
Normal file
28
pkg/webhook/v1/webhook.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
type ManagedClusterWebhook struct {
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func (r *ManagedClusterWebhook) Init(mgr ctrl.Manager) error {
|
||||
err := r.SetupWebhookWithManager(mgr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig())
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ManagedClusterWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
WithValidator(r).
|
||||
WithDefaulter(r).
|
||||
For(&v1.ManagedCluster{}).
|
||||
Complete()
|
||||
}
|
||||
99
pkg/webhook/v1beta1/managedclustersetbinding_validating.go
Normal file
99
pkg/webhook/v1beta1/managedclustersetbinding_validating.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
var _ webhook.CustomValidator = &ManagedClusterSetBindingWebhook{}
|
||||
|
||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error {
|
||||
binding, ok := obj.(*v1beta1.ManagedClusterSetBinding)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request clustersetbinding obj format is not right")
|
||||
}
|
||||
|
||||
// force the instance name to match the target cluster set name
|
||||
if binding.Name != binding.Spec.ClusterSet {
|
||||
return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet")
|
||||
}
|
||||
|
||||
req, err := admission.RequestFromContext(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
return AllowBindingToClusterSet(b.kubeClient, binding.Spec.ClusterSet, req.UserInfo)
|
||||
}
|
||||
|
||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
binding, ok := newObj.(*v1beta1.ManagedClusterSetBinding)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request clustersetbinding obj format is not right")
|
||||
}
|
||||
|
||||
// force the instance name to match the target cluster set name
|
||||
if binding.Name != binding.Spec.ClusterSet {
|
||||
return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// allowBindingToClusterSet checks if the user has permission to bind a particular cluster set
|
||||
func AllowBindingToClusterSet(kubeClient kubernetes.Interface, clusterSetName string, userInfo authenticationv1.UserInfo) error {
|
||||
extra := make(map[string]authorizationv1.ExtraValue)
|
||||
for k, v := range userInfo.Extra {
|
||||
extra[k] = authorizationv1.ExtraValue(v)
|
||||
}
|
||||
|
||||
sar := &authorizationv1.SubjectAccessReview{
|
||||
Spec: authorizationv1.SubjectAccessReviewSpec{
|
||||
User: userInfo.Username,
|
||||
UID: userInfo.UID,
|
||||
Groups: userInfo.Groups,
|
||||
Extra: extra,
|
||||
ResourceAttributes: &authorizationv1.ResourceAttributes{
|
||||
Group: "cluster.open-cluster-management.io",
|
||||
Resource: "managedclustersets",
|
||||
Subresource: "bind",
|
||||
Verb: "create",
|
||||
Name: clusterSetName,
|
||||
},
|
||||
},
|
||||
}
|
||||
sar, err := kubeClient.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return apierrors.NewForbidden(
|
||||
v1beta1.Resource("managedclustersets/bind"),
|
||||
clusterSetName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
if !sar.Status.Allowed {
|
||||
return apierrors.NewForbidden(
|
||||
v1beta1.Resource("managedclustersets/bind"),
|
||||
clusterSetName,
|
||||
fmt.Errorf("user %q is not allowed to bind cluster set %q", userInfo.Username, clusterSetName),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
176
pkg/webhook/v1beta1/managedclustersetbinding_validating_test.go
Normal file
176
pkg/webhook/v1beta1/managedclustersetbinding_validating_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
func TestValidateCreate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
setbinding *v1beta1.ManagedClusterSetBinding
|
||||
expectedError bool
|
||||
allowBindingToClusterSet bool
|
||||
}{
|
||||
{
|
||||
name: "Right Clustersetbinding",
|
||||
expectedError: false,
|
||||
allowBindingToClusterSet: true,
|
||||
setbinding: &v1beta1.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "setbinding-1",
|
||||
},
|
||||
Spec: v1beta1.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: "setbinding-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Set name is not right",
|
||||
expectedError: true,
|
||||
allowBindingToClusterSet: true,
|
||||
setbinding: &v1beta1.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "setbinding-1",
|
||||
},
|
||||
Spec: v1beta1.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: "setbinding",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Do not have permission",
|
||||
expectedError: true,
|
||||
allowBindingToClusterSet: false,
|
||||
setbinding: &v1beta1.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "setbinding-1",
|
||||
},
|
||||
Spec: v1beta1.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: "setbinding-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
kubeClient := kubefake.NewSimpleClientset()
|
||||
kubeClient.PrependReactor(
|
||||
"create",
|
||||
"subjectaccessreviews",
|
||||
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &authorizationv1.SubjectAccessReview{
|
||||
Status: authorizationv1.SubjectAccessReviewStatus{
|
||||
Allowed: c.allowBindingToClusterSet,
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
w := ManagedClusterSetBindingWebhook{
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
req := admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "test.open-cluster-management.io",
|
||||
Version: "v1",
|
||||
Resource: "tests",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := admission.NewContextWithRequest(context.Background(), req)
|
||||
|
||||
err := w.ValidateCreate(ctx, c.setbinding)
|
||||
if err != nil && !c.expectedError {
|
||||
t.Errorf("Case:%v, Expect nil Error but got err:%v", c.name, err)
|
||||
}
|
||||
if err == nil && c.expectedError {
|
||||
t.Errorf("Case:%v, Expect Error but got nil", c.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
w := ManagedClusterSetBindingWebhook{}
|
||||
err := w.ValidateCreate(context.Background(), &v1beta1.ManagedClusterSet{})
|
||||
if err == nil {
|
||||
t.Errorf("Non setbinding obj, Expect Error but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateUpddate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
setbinding *v1beta1.ManagedClusterSetBinding
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "Right Clustersetbinding",
|
||||
expectedError: false,
|
||||
setbinding: &v1beta1.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "setbinding-1",
|
||||
},
|
||||
Spec: v1beta1.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: "setbinding-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Set name is not right",
|
||||
expectedError: true,
|
||||
setbinding: &v1beta1.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns-1",
|
||||
Name: "setbinding-1",
|
||||
},
|
||||
Spec: v1beta1.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: "setbinding",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
w := ManagedClusterSetBindingWebhook{}
|
||||
req := admission.Request{
|
||||
AdmissionRequest: admissionv1.AdmissionRequest{
|
||||
Resource: metav1.GroupVersionResource{
|
||||
Group: "test.open-cluster-management.io",
|
||||
Version: "v1",
|
||||
Resource: "tests",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := admission.NewContextWithRequest(context.Background(), req)
|
||||
|
||||
err := w.ValidateUpdate(ctx, nil, c.setbinding)
|
||||
if err != nil && !c.expectedError {
|
||||
t.Errorf("Case:%v, Expect nil Error but not err:%v", c.name, err)
|
||||
}
|
||||
if err == nil && c.expectedError {
|
||||
t.Errorf("Case:%v, Expect Error but not nil", c.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
w := ManagedClusterSetBindingWebhook{}
|
||||
err := w.ValidateUpdate(context.Background(), nil, &v1beta1.ManagedClusterSet{})
|
||||
if err == nil {
|
||||
t.Errorf("Non setbinding obj, Expect Error but got nil")
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@ package v1beta1
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"open-cluster-management.io/api/cluster/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
@@ -17,6 +19,7 @@ var (
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(v1beta1.GroupVersion,
|
||||
&ManagedClusterSet{},
|
||||
&v1beta1.ManagedClusterSetBinding{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, v1beta1.GroupVersion)
|
||||
return nil
|
||||
@@ -26,8 +29,35 @@ type ManagedClusterSet struct {
|
||||
v1beta1.ManagedClusterSet
|
||||
}
|
||||
|
||||
func SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
type ManagedClusterSetBindingWebhook struct {
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func (r *ManagedClusterSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(&ManagedClusterSet{}).
|
||||
For(r).
|
||||
Complete()
|
||||
}
|
||||
|
||||
func (b *ManagedClusterSetBindingWebhook) Init(mgr ctrl.Manager) error {
|
||||
err := b.SetupWebhookWithManager(mgr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig())
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *ManagedClusterSetBindingWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
WithValidator(b).
|
||||
For(&v1beta1.ManagedClusterSetBinding{}).
|
||||
Complete()
|
||||
}
|
||||
|
||||
func ClustersetbindingGroupKind() schema.GroupKind {
|
||||
return schema.GroupKind{
|
||||
Group: v1beta1.GroupName,
|
||||
Kind: "ManagedClusterSetBinding",
|
||||
}
|
||||
}
|
||||
|
||||
54
pkg/webhook/v1beta2/managedclustersetbinding_validating.go
Normal file
54
pkg/webhook/v1beta2/managedclustersetbinding_validating.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"open-cluster-management.io/api/cluster/v1beta2"
|
||||
internalv1beta1 "open-cluster-management.io/registration/pkg/webhook/v1beta1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
var _ webhook.CustomValidator = &ManagedClusterSetBindingWebhook{}
|
||||
|
||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) error {
|
||||
binding, ok := obj.(*v1beta2.ManagedClusterSetBinding)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request clustersetbinding obj format is not right")
|
||||
}
|
||||
|
||||
// force the instance name to match the target cluster set name
|
||||
if binding.Name != binding.Spec.ClusterSet {
|
||||
return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet")
|
||||
}
|
||||
|
||||
req, err := admission.RequestFromContext(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(err.Error())
|
||||
}
|
||||
return internalv1beta1.AllowBindingToClusterSet(b.kubeClient, binding.Spec.ClusterSet, req.UserInfo)
|
||||
}
|
||||
|
||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
binding, ok := newObj.(*v1beta2.ManagedClusterSetBinding)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Request clustersetbinding obj format is not right")
|
||||
}
|
||||
|
||||
// force the instance name to match the target cluster set name
|
||||
if binding.Name != binding.Spec.ClusterSet {
|
||||
return apierrors.NewBadRequest("The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||
func (b *ManagedClusterSetBindingWebhook) ValidateDelete(_ context.Context, obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package v1beta2
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"open-cluster-management.io/api/cluster/v1beta2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
@@ -17,6 +18,7 @@ var (
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(v1beta2.GroupVersion,
|
||||
&ManagedClusterSet{},
|
||||
&v1beta2.ManagedClusterSetBinding{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, v1beta2.GroupVersion)
|
||||
return nil
|
||||
@@ -26,8 +28,28 @@ type ManagedClusterSet struct {
|
||||
v1beta2.ManagedClusterSet
|
||||
}
|
||||
|
||||
func SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
type ManagedClusterSetBindingWebhook struct {
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func (src *ManagedClusterSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(&ManagedClusterSet{}).
|
||||
For(src).
|
||||
Complete()
|
||||
}
|
||||
|
||||
func (b *ManagedClusterSetBindingWebhook) Init(mgr ctrl.Manager) error {
|
||||
err := b.SetupWebhookWithManager(mgr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.kubeClient, err = kubernetes.NewForConfig(mgr.GetConfig())
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *ManagedClusterSetBindingWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
WithValidator(b).
|
||||
For(&v1beta2.ManagedClusterSetBinding{}).
|
||||
Complete()
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
|
||||
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
|
||||
var hubNamespace = "open-cluster-management-hub"
|
||||
var mutatingWebhookName = "managedcluster-admission"
|
||||
var mutatingWebhookContainerName = "managedcluster-admission"
|
||||
var mutatingWebhookContainerName = "webhook"
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||
@@ -41,13 +40,12 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
hubClient kubernetes.Interface
|
||||
hubDynamicClient dynamic.Interface
|
||||
hubAPIServiceClient *apiregistrationclient.ApiregistrationV1Client
|
||||
hubAddOnClient addonclient.Interface
|
||||
clusterClient clusterclient.Interface
|
||||
registrationImage string
|
||||
clusterCfg *rest.Config
|
||||
hubClient kubernetes.Interface
|
||||
hubDynamicClient dynamic.Interface
|
||||
hubAddOnClient addonclient.Interface
|
||||
clusterClient clusterclient.Interface
|
||||
registrationImage string
|
||||
clusterCfg *rest.Config
|
||||
)
|
||||
|
||||
// This suite is sensitive to the following environment variables:
|
||||
@@ -85,11 +83,6 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
return err
|
||||
}
|
||||
|
||||
hubAPIServiceClient, err = apiregistrationclient.NewForConfig(clusterCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hubAddOnClient, err = addonclient.NewForConfig(clusterCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -12,29 +12,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("ManagedClusterSetBinding", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// make sure the api service v1.admission.cluster.open-cluster-management.io is available
|
||||
gomega.Eventually(func() bool {
|
||||
apiService, err := hubAPIServiceClient.APIServices().Get(context.TODO(), apiserviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if len(apiService.Status.Conditions) == 0 {
|
||||
return false
|
||||
}
|
||||
return apiService.Status.Conditions[0].Type == apiregistrationv1.Available &&
|
||||
apiService.Status.Conditions[0].Status == apiregistrationv1.ConditionTrue
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.Context("ManagedClusterSetBinding", func() {
|
||||
var namespace string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
// create a namespace for testing
|
||||
namespace = fmt.Sprintf("ns-%s", rand.String(6))
|
||||
|
||||
@@ -3,6 +3,7 @@ package e2e
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -20,7 +21,6 @@ var _ = ginkgo.Describe("Taints update check", func() {
|
||||
managedCluster *clusterv1.ManagedCluster
|
||||
clusterName string
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
clusterName = fmt.Sprintf("managedcluster-%s", rand.String(6))
|
||||
managedCluster = &clusterv1.ManagedCluster{
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -22,12 +23,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultClusterSetName = "default"
|
||||
apiserviceName = "v1.admission.cluster.open-cluster-management.io"
|
||||
invalidURL = "127.0.0.1:8001"
|
||||
validURL = "https://127.0.0.1:8443"
|
||||
saNamespace = "default"
|
||||
@@ -45,18 +44,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
clusterClient: clusterClient,
|
||||
registrationImage: registrationImage,
|
||||
}
|
||||
// make sure the api service v1.admission.cluster.open-cluster-management.io is available
|
||||
gomega.Eventually(func() bool {
|
||||
apiService, err := hubAPIServiceClient.APIServices().Get(context.TODO(), apiserviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if len(apiService.Status.Conditions) == 0 {
|
||||
return false
|
||||
}
|
||||
return apiService.Status.Conditions[0].Type == apiregistrationv1.Available &&
|
||||
apiService.Status.Conditions[0].Status == apiregistrationv1.ConditionTrue
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.Context("ManagedCluster", func() {
|
||||
@@ -237,13 +224,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
_, err = unauthorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: user \"system:serviceaccount:%s:%s\" cannot update the HubAcceptsClient field",
|
||||
admissionName,
|
||||
saNamespace,
|
||||
sa,
|
||||
)))
|
||||
|
||||
gomega.Expect(u.deleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred())
|
||||
gomega.Expect(cleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
@@ -360,14 +340,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
_, err = unauthorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: user \"system:serviceaccount:%s:%s\" cannot add/remove a ManagedCluster to/from ManagedClusterSet \"%s\"",
|
||||
admissionName,
|
||||
saNamespace,
|
||||
sa,
|
||||
clusterSetName,
|
||||
)))
|
||||
|
||||
gomega.Expect(u.deleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred())
|
||||
gomega.Expect(cleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
@@ -453,11 +425,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: url \"%s\" is invalid in client configs",
|
||||
admissionName,
|
||||
invalidURL,
|
||||
)))
|
||||
})
|
||||
|
||||
ginkgo.It("Should forbid the request when updating an unaccepted managed cluster to accepted by unauthorized user", func() {
|
||||
@@ -485,12 +452,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: user \"system:serviceaccount:%s:%s\" cannot update the HubAcceptsClient field",
|
||||
admissionName,
|
||||
saNamespace,
|
||||
sa,
|
||||
)))
|
||||
|
||||
gomega.Expect(cleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
@@ -585,13 +546,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: user \"system:serviceaccount:%s:%s\" cannot add/remove a ManagedCluster to/from ManagedClusterSet \"%s\"",
|
||||
admissionName,
|
||||
saNamespace,
|
||||
sa,
|
||||
clusterSetName,
|
||||
)))
|
||||
|
||||
gomega.Expect(cleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
@@ -642,10 +596,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
_, err := clusterClient.ClusterV1beta1().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet",
|
||||
admissionName,
|
||||
)))
|
||||
})
|
||||
|
||||
ginkgo.It("should accept the request when creating a ManagedClusterSetBinding by authorized user", func() {
|
||||
@@ -693,13 +643,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
_, err = unauthorizedClient.ClusterV1beta1().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: user \"system:serviceaccount:%s:%s\" is not allowed to bind cluster set \"%s\"",
|
||||
admissionName,
|
||||
namespace,
|
||||
sa,
|
||||
clusterSetName,
|
||||
)))
|
||||
|
||||
gomega.Expect(cleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
@@ -719,10 +662,6 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
_, err = clusterClient.ClusterV1beta1().ManagedClusterSetBindings(namespace).Patch(context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf(
|
||||
"admission webhook \"%s\" denied the request: The ManagedClusterSetBinding must have the same name as the target ManagedClusterSet",
|
||||
admissionName,
|
||||
)))
|
||||
})
|
||||
|
||||
ginkgo.It("should accept the request when updating the label of the ManagedClusterSetBinding by user without binding permission", func() {
|
||||
@@ -755,6 +694,114 @@ var _ = ginkgo.Describe("Admission webhook", func() {
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Creating a ManagedClusterSetBinding v1beta2", func() {
|
||||
ginkgo.It("should deny the request when creating a ManagedClusterSetBinding with unmatched cluster set name", func() {
|
||||
clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
clusterSetBindingName := fmt.Sprintf("clustersetbinding-%s", rand.String(6))
|
||||
managedClusterSetBinding := newManagedClusterSetBindingV1beta2(namespace, clusterSetBindingName, clusterSetName)
|
||||
_, err := clusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should accept the request when creating a ManagedClusterSetBinding by authorized user", func() {
|
||||
sa := fmt.Sprintf("webhook-sa-%s", rand.String(6))
|
||||
clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
|
||||
authorizedClient, err := buildClusterClient(namespace, sa, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"cluster.open-cluster-management.io"},
|
||||
Resources: []string{"managedclustersets/bind"},
|
||||
Verbs: []string{"create"},
|
||||
},
|
||||
}, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"cluster.open-cluster-management.io"},
|
||||
Resources: []string{"managedclustersetbindings"},
|
||||
Verbs: []string{"create", "get", "update", "patch"},
|
||||
},
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
managedClusterSetBinding := newManagedClusterSetBindingV1beta2(namespace, clusterSetName, clusterSetName)
|
||||
_, err = authorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
gomega.Expect(cleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("should forbid the request when creating a ManagedClusterSetBinding by unauthorized user", func() {
|
||||
sa := fmt.Sprintf("webhook-sa-%s", rand.String(6))
|
||||
clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
|
||||
// prepare an unauthorized cluster client from a service account who can create/get/update ManagedClusterSetBinding
|
||||
// but cannot bind ManagedClusterSet
|
||||
unauthorizedClient, err := buildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"cluster.open-cluster-management.io"},
|
||||
Resources: []string{"managedclustersetbindings"},
|
||||
Verbs: []string{"create", "get", "update", "patch"},
|
||||
},
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
managedClusterSetBinding := newManagedClusterSetBindingV1beta2(namespace, clusterSetName, clusterSetName)
|
||||
_, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue())
|
||||
|
||||
gomega.Expect(cleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Updating a ManagedClusterSetBinding", func() {
|
||||
ginkgo.It("should deny the request when updating a ManagedClusterSetBinding with a new cluster set", func() {
|
||||
// create a cluster set binding
|
||||
clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
managedClusterSetBinding := newManagedClusterSetBindingV1beta2(namespace, clusterSetName, clusterSetName)
|
||||
managedClusterSetBinding, err := clusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// update the cluster set binding
|
||||
clusterSetName = fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
patch := fmt.Sprintf("{\"spec\": {\"clusterSet\": %q}}", clusterSetName)
|
||||
_, err = clusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch(context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should accept the request when updating the label of the ManagedClusterSetBinding by user without binding permission", func() {
|
||||
// create a cluster set binding
|
||||
clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6))
|
||||
managedClusterSetBinding := newManagedClusterSetBindingV1beta2(namespace, clusterSetName, clusterSetName)
|
||||
_, err := clusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// create a client without clusterset binding permission
|
||||
sa := fmt.Sprintf("webhook-sa-%s", rand.String(6))
|
||||
unauthorizedClient, err := buildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"cluster.open-cluster-management.io"},
|
||||
Resources: []string{"managedclustersetbindings"},
|
||||
Verbs: []string{"create", "get", "update"},
|
||||
},
|
||||
})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// update the cluster set binding by unauthorized user
|
||||
gomega.Eventually(func() error {
|
||||
binding, err := unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binding.Labels = map[string]string{"owner": "user"}
|
||||
_, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Update(context.TODO(), binding, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, 60*time.Second, 1*time.Second).Should(gomega.Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
@@ -787,6 +834,18 @@ func newManagedClusterSetBinding(namespace, name string, clusterSet string) *clu
|
||||
}
|
||||
}
|
||||
|
||||
func newManagedClusterSetBindingV1beta2(namespace, name string, clusterSet string) *clusterv1beta2.ManagedClusterSetBinding {
|
||||
return &clusterv1beta2.ManagedClusterSetBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: clusterv1beta2.ManagedClusterSetBindingSpec{
|
||||
ClusterSet: clusterSet,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildClusterClient(saNamespace, saName string, clusterPolicyRules, policyRules []rbacv1.PolicyRule) (clusterv1client.Interface, error) {
|
||||
var err error
|
||||
|
||||
|
||||
Reference in New Issue
Block a user