add detached mode to cluster-manager controller (#190)

Signed-off-by: xuezhaojun <zxue@redhat.com>
This commit is contained in:
xuezhaojun
2022-02-24 20:57:40 +08:00
committed by GitHub
parent 71fcc78d5f
commit 00fcdcd30a
57 changed files with 1381 additions and 698 deletions

View File

@@ -112,7 +112,7 @@ apply-hub-cr:
$(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml | $(KUBECTL) apply -f -
apply-hub-cr-detached: external-hub-secret
$(SED_CMD) -e "s,cluster-manager,$(DETACHED_CLUSTER_MANAGER_NAME)," -e "s,mode: Default,mode: Detached," -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers.cr.yaml | $(KUBECTL) apply -f -
$(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/placement,$(PLACEMENT_IMAGE)," deploy/cluster-manager/config/samples/operator_open-cluster-management_clustermanagers_detached.cr.yaml | $(KUBECTL) apply -f -
clean-hub: clean-hub-cr clean-hub-operator

View File

@@ -42,7 +42,7 @@ We mainly provide deployment in two scenarios:
### Deploy all-in-on deployment
1. Set an env variable `KUBECONFIG` to kubeconfig file path.
1. Set the env variable `KUBECONFIG` to kubeconfig file path.
```shell
export KUBECONFIG=$HOME/.kube/config
```
@@ -82,10 +82,82 @@ We mainly provide deployment in two scenarios:
```
5. To clean the spoke environment.
```shell
kubectl config use-context {spoke context}
kubectl config use-context {spoke-context}
make clean-spoke
```
### Deploy hub(Clustermanager) with Detached mode
1. Create 3 Kind clusters: management cluster, hub cluster and a managed cluster.
```shell
kind create cluster --name hub
cat <<EOF | kind create cluster --name management --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraPortMappings:
- containerPort: 30443
hostPort: 30443
protocol: TCP
- containerPort: 31443
hostPort: 31443
protocol: TCP
EOF
kind create cluster --name managed
```
2. Set the env variable `KUBECONFIG` to kubeconfig file path.
```shell
export KUBECONFIG=$HOME/.kube/config
```
3. Get the `EXTERNAL_HUB_KUBECONFIG` kubeconfig.
```shell
kind get kubeconfig --name kind-hub --internal > ./.external-hub-kubeconfig
```
4. Switch to management cluster and deploy hub components.
```shell
kubectl config use-context {management-context}
make deploy-hub-detached
```
After deploy hub successfully, the user needs to expose webhook-servers in the management cluster manually.
```shell
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: cluster-manager-registration-webhook-external
namespace: cluster-manager
spec:
type: NodePort
selector:
app: cluster-manager-registration-webhook
ports:
- port: 443
targetPort: 6443
nodePort: 30443
---
apiVersion: v1
kind: Service
metadata:
name: cluster-manager-work-webhook-external
namespace: cluster-manager
spec:
type: NodePort
selector:
app: cluster-manager-work-webhook
ports:
- port: 443
targetPort: 6443
nodePort: 31443
EOF
```
### Deploy spoke(Klusterlet) with Detached mode
We support deploy the Klusterlet(registration-agent, work-agent) outside of managed cluster, called `Detached` mode, and we define the cluster where the Klusterlet runs as management-cluster.

View File

@@ -0,0 +1,17 @@
apiVersion: operator.open-cluster-management.io/v1
kind: ClusterManager
metadata:
name: cluster-manager
spec:
registrationImagePullSpec: quay.io/open-cluster-management/registration
workImagePullSpec: quay.io/open-cluster-management/work
placementImagePullSpec: quay.io/open-cluster-management/placement
deployOption:
mode: Detached
detached:
registrationWebhookConfiguration:
address: management-control-plane
port: 30443
workWebhookConfiguration:
address: management-control-plane
port: 31443

View File

@@ -18,6 +18,31 @@ metadata:
"registrationImagePullSpec": "quay.io/open-cluster-management/registration",
"workImagePullSpec": "quay.io/open-cluster-management/work"
}
},
{
"apiVersion": "operator.open-cluster-management.io/v1",
"kind": "ClusterManager",
"metadata": {
"name": "cluster-manager"
},
"spec": {
"deployOption": {
"detached": {
"registrationWebhookConfiguration": {
"address": "management-control-plane",
"port": 30443
},
"workWebhookConfiguration": {
"address": "management-control-plane",
"port": 31443
}
},
"mode": "Detached"
},
"placementImagePullSpec": "quay.io/open-cluster-management/placement",
"registrationImagePullSpec": "quay.io/open-cluster-management/registration",
"workImagePullSpec": "quay.io/open-cluster-management/work"
}
}
]
capabilities: Basic Install

View File

@@ -34,6 +34,42 @@ spec:
deployOption:
description: DeployOption contains the options of deploying a klusterlet
properties:
detached:
description: Detached includes configurations we needs for clustermanager in the detached mode.
properties:
registrationWebhookConfiguration:
description: RegistrationWebhookConfiguration represents the customized webhook-server configuration of registration.
properties:
address:
description: Address represents the address of a webhook-server. It could be in IP format or fqdn format. The Address must be reachable by apiserver of the hub cluster.
pattern: ^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$
type: string
port:
default: 443
description: Port represents the port of a webhook-server. The default value of Port is 443.
format: int32
maximum: 65535
type: integer
required:
- address
type: object
workWebhookConfiguration:
description: WorkWebhookConfiguration represents the customized webhook-server configuration of work.
properties:
address:
description: Address represents the address of a webhook-server. It could be in IP format or fqdn format. The Address must be reachable by apiserver of the hub cluster.
pattern: ^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$
type: string
port:
default: 443
description: Port represents the port of a webhook-server. The default value of Port is 443.
format: int32
maximum: 65535
type: integer
required:
- address
type: object
type: object
mode:
description: 'Mode can be Default or Detached. It is Default mode if not specified In Default mode, all klusterlet related resources are deployed on the managed cluster. In Detached mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of "external-managed-kubeconfig"(a kubeconfig of managed-cluster with cluster-admin permission). Note: Do not modify the Mode field once it''s applied.'
type: string

2
go.mod
View File

@@ -11,7 +11,7 @@ require (
github.com/onsi/gomega v1.10.1
github.com/openshift/api v0.0.0-20210331193751-3acddb19d360
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3
github.com/openshift/library-go v0.0.0-20210407110438-80b76d711afb
github.com/openshift/library-go v0.0.0-20210407140145-f831e911c638
github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
k8s.io/api v0.21.1

4
go.sum
View File

@@ -423,8 +423,8 @@ github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mo
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 h1:65oBhJYHzYK5VL0gF1eiYY37lLzyLZ47b9y5Kib1nf8=
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
github.com/openshift/client-go v0.0.0-20210331195552-cf6c2669e01f/go.mod h1:hHaRJ6vp2MRd/CpuZ1oJkqnMGy5eEnoAkQmKPZKcUPI=
github.com/openshift/library-go v0.0.0-20210407110438-80b76d711afb h1:11VU4Ppng9FtJJ5D9eTQZhZjtq0KMTfy5kmvUpVeW68=
github.com/openshift/library-go v0.0.0-20210407110438-80b76d711afb/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU=
github.com/openshift/library-go v0.0.0-20210407140145-f831e911c638 h1:JVMywK3dwzPAwpTCWIHn2Emx5L11I+0OR15CZXHI4do=
github.com/openshift/library-go v0.0.0-20210407140145-f831e911c638/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=

View File

@@ -4,7 +4,7 @@ source "$(dirname "${BASH_SOURCE}")/init.sh"
for f in $HUB_CRD_FILES
do
cp $f ./manifests/cluster-manager/
cp $f ./manifests/cluster-manager/hub/
done
for f in $SPOKE_CRD_FILES

View File

@@ -4,7 +4,7 @@ source "$(dirname "${BASH_SOURCE}")/init.sh"
for f in $HUB_CRD_FILES
do
diff -N $f ./manifests/cluster-manager/$(basename $f) || ( echo 'crd content is incorrect' && false )
diff -N $f ./manifests/cluster-manager/hub/$(basename $f) || ( echo 'crd content is incorrect' && false )
done
for f in $SPOKE_CRD_FILES

View File

@@ -8,7 +8,11 @@ spec:
service:
name: cluster-manager-registration-webhook
namespace: {{ .ClusterManagerNamespace }}
port: 443
{{if eq .RegistrationWebhook.Port 0}}
port: 443
{{else}}
port: {{.RegistrationWebhook.Port}}
{{end}}
caBundle: {{ .RegistrationAPIServiceCABundle }}
groupPriorityMinimum: 10000
versionPriority: 20

View File

@@ -0,0 +1,11 @@
kind: Endpoints
apiVersion: v1
metadata:
name: cluster-manager-registration-webhook
namespace: {{ .ClusterManagerNamespace }}
subsets:
- addresses:
- ip: {{.RegistrationWebhook.Address}}
ports:
- port: {{.RegistrationWebhook.Port}}
name: tls

View File

@@ -0,0 +1,23 @@
{{if .RegistrationWebhook.IsIPFormat}}
kind: Service
apiVersion: v1
metadata:
name: cluster-manager-registration-webhook
namespace: {{ .ClusterManagerNamespace }}
spec:
type: ClusterIP
ports:
- protocol: TCP
port: {{.RegistrationWebhook.Port}}
targetPort: {{.RegistrationWebhook.Port}}
name: tls
{{else}}
apiVersion: v1
kind: Service
metadata:
name: cluster-manager-registration-webhook
namespace: {{ .ClusterManagerNamespace }}
spec:
type: ExternalName
externalName: {{.RegistrationWebhook.Address}}
{{end}}

View File

@@ -8,7 +8,11 @@ spec:
service:
name: cluster-manager-work-webhook
namespace: {{ .ClusterManagerNamespace }}
port: 443
{{if eq .WorkWebhook.Port 0}}
port: 443
{{else}}
port: {{.WorkWebhook.Port}}
{{end}}
caBundle: {{ .WorkAPIServiceCABundle }}
groupPriorityMinimum: 10000
versionPriority: 20

View File

@@ -0,0 +1,11 @@
kind: Endpoints
apiVersion: v1
metadata:
name: cluster-manager-work-webhook
namespace: {{ .ClusterManagerNamespace }}
subsets:
- addresses:
- ip: {{.WorkWebhook.Address}}
ports:
- port: {{.WorkWebhook.Port}}
name: tls

View File

@@ -0,0 +1,23 @@
{{if .WorkWebhook.IsIPFormat}}
kind: Service
apiVersion: v1
metadata:
name: cluster-manager-work-webhook
namespace: {{ .ClusterManagerNamespace }}
spec:
type: ClusterIP
ports:
- protocol: TCP
port: {{.WorkWebhook.Port}}
targetPort: {{.WorkWebhook.Port}}
name: tls
{{else}}
apiVersion: v1
kind: Service
metadata:
name: cluster-manager-work-webhook
namespace: {{ .ClusterManagerNamespace }}
spec:
type: ExternalName
externalName: {{.WorkWebhook.Address}}
{{end}}

View File

@@ -36,13 +36,18 @@ spec:
operator: In
values:
- clustermanager-placement-controller
{{ if not .DetachedMode }}
serviceAccountName: {{ .ClusterManagerName }}-placement-controller-sa
{{ end }}
containers:
- name: placement-controller
image: {{ .PlacementImage }}
args:
- "/placement"
- "controller"
{{ if .DetachedMode }}
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
{{ end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -67,3 +72,13 @@ spec:
requests:
cpu: 100m
memory: 128Mi
{{ if .DetachedMode }}
volumeMounts:
- mountPath: /var/run/secrets/hub
name: kubeconfig
readOnly: true
volumes:
- name: kubeconfig
secret:
secretName: {{ .ClusterManagerName }}-placement-controller-sa-kubeconfig
{{ end }}

View File

@@ -36,13 +36,18 @@ spec:
operator: In
values:
- clustermanager-registration-controller
{{ if not .DetachedMode }}
serviceAccountName: {{ .ClusterManagerName }}-registration-controller-sa
{{ end }}
containers:
- name: hub-registration-controller
image: {{ .RegistrationImage }}
args:
- "/registration"
- "controller"
{{ if .DetachedMode }}
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
{{ end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -67,3 +72,13 @@ spec:
requests:
cpu: 100m
memory: 128Mi
{{ if .DetachedMode }}
volumeMounts:
- mountPath: /var/run/secrets/hub
name: kubeconfig
readOnly: true
volumes:
- name: kubeconfig
secret:
secretName: {{ .ClusterManagerName }}-registration-controller-sa-kubeconfig
{{ end }}

View File

@@ -36,7 +36,9 @@ spec:
operator: In
values:
- {{ .ClusterManagerName }}-registration-webhook
{{ if not .DetachedMode }}
serviceAccountName: {{ .ClusterManagerName }}-registration-webhook-sa
{{ end }}
containers:
- name: {{ .ClusterManagerName }}-registration-webhook-sa
image: {{ .RegistrationImage }}
@@ -46,6 +48,11 @@ spec:
- "--secure-port=6443"
- "--tls-cert-file=/serving-cert/tls.crt"
- "--tls-private-key-file=/serving-cert/tls.key"
{{ if .DetachedMode }}
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
- "--authentication-kubeconfig=/var/run/secrets/hub/kubeconfig"
- "--authorization-kubeconfig=/var/run/secrets/hub/kubeconfig"
{{ end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -74,8 +81,17 @@ spec:
- name: webhook-secret
mountPath: "/serving-cert"
readOnly: true
{{ if .DetachedMode }}
- mountPath: /var/run/secrets/hub
name: kubeconfig
readOnly: true
{{ end }}
volumes:
- name: webhook-secret
secret:
secretName: registration-webhook-serving-cert
{{ if .DetachedMode }}
- name: kubeconfig
secret:
secretName: {{ .ClusterManagerName }}-registration-webhook-sa-kubeconfig
{{ end }}

View File

@@ -36,7 +36,9 @@ spec:
operator: In
values:
- {{ .ClusterManagerName }}-work-webhook
{{ if not .DetachedMode }}
serviceAccountName: {{ .ClusterManagerName }}-work-webhook-sa
{{ end }}
containers:
- name: {{ .ClusterManagerName }}-work-webhook-sa
image: {{ .WorkImage }}
@@ -46,6 +48,11 @@ spec:
- "--secure-port=6443"
- "--tls-cert-file=/serving-cert/tls.crt"
- "--tls-private-key-file=/serving-cert/tls.key"
{{ if .DetachedMode }}
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
- "--authentication-kubeconfig=/var/run/secrets/hub/kubeconfig"
- "--authorization-kubeconfig=/var/run/secrets/hub/kubeconfig"
{{ end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -74,8 +81,18 @@ spec:
- name: webhook-secret
mountPath: "/serving-cert"
readOnly: true
{{ if .DetachedMode }}
- mountPath: /var/run/secrets/hub
name: kubeconfig
readOnly: true
{{ end }}
volumes:
- name: webhook-secret
secret:
secretName: work-webhook-serving-cert
{{ if .DetachedMode }}
- name: kubeconfig
secret:
secretName: {{ .ClusterManagerName }}-work-webhook-sa-kubeconfig
{{ end }}

View File

@@ -9,4 +9,13 @@ type HubConfig struct {
WorkAPIServiceCABundle string
PlacementImage string
Replica int32
DetachedMode bool
RegistrationWebhook Webhook
WorkWebhook Webhook
}
type Webhook struct {
IsIPFormat bool
Port int32
Address string
}

View File

@@ -710,21 +710,19 @@ func SyncSecret(client, targetClient coreclientv1.SecretsGetter, recorder events
}
// GetHubKubeconfig is used to get the kubeconfig of the hub cluster.
// If it's Default mode, the kubeconfig of the hub cluster should equal to the management cluster's kubeconfig.
// If it's Default mode, the kubeconfig of the hub cluster should equal to the operator cluster's kubeconfig and mostly, it's the InClusterConfig.
// If it's Detached mode, the kubeconfig of the hub cluster is stored as a secret under clustermanager namespace.
func GetHubKubeconfig(ctx context.Context,
managementKubeconfig *rest.Config, // this is the kubeconfig of the cluster which controller is running on now.
managementKubeclient kubernetes.Interface,
clusternamagerName string,
operatorKubeconfig *rest.Config, // this is the kubeconfig of the cluster which controller is running on now.
operatorClient kubernetes.Interface,
clustermamagerName string,
clustermanagerMode operatorapiv1.InstallMode) (*rest.Config, error) {
switch clustermanagerMode {
case operatorapiv1.InstallModeDefault:
return managementKubeconfig, nil
case operatorapiv1.InstallModeDetached:
clustermanagerNamespace := ClusterManagerNamespace(clusternamagerName, clustermanagerMode)
clustermanagerNamespace := ClusterManagerNamespace(clustermamagerName, clustermanagerMode)
// get secret of external kubeconfig
secret, err := managementKubeclient.CoreV1().Secrets(clustermanagerNamespace).Get(ctx, ExternalHubKubeConfig, metav1.GetOptions{})
secret, err := operatorClient.CoreV1().Secrets(clustermanagerNamespace).Get(ctx, ExternalHubKubeConfig, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -737,6 +735,6 @@ func GetHubKubeconfig(ctx context.Context,
return config, nil
default:
// backward compatible with previous crd.
return managementKubeconfig, nil
return operatorKubeconfig, nil
}
}

View File

@@ -1071,7 +1071,7 @@ func TestGetRelatedResource(t *testing.T) {
}{
{
name: "get correct crd relatedResources",
manifestFile: "cluster-manager/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml",
manifestFile: "cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml",
config: manifests.HubConfig{
ClusterManagerName: "test",
Replica: 1,
@@ -1087,7 +1087,7 @@ func TestGetRelatedResource(t *testing.T) {
},
{
name: "get correct clusterrole relatedResources",
manifestFile: "cluster-manager/cluster-manager-registration-clusterrole.yaml",
manifestFile: "cluster-manager/hub/cluster-manager-registration-clusterrole.yaml",
config: manifests.HubConfig{
ClusterManagerName: "test",
Replica: 1,
@@ -1103,7 +1103,7 @@ func TestGetRelatedResource(t *testing.T) {
},
{
name: "get correct deployment relatedResources",
manifestFile: "cluster-manager/cluster-manager-registration-deployment.yaml",
manifestFile: "cluster-manager/management/cluster-manager-registration-deployment.yaml",
config: manifests.HubConfig{
ClusterManagerName: "test",
ClusterManagerNamespace: "test-namespace",

View File

@@ -3,6 +3,7 @@ package helpers
import (
"context"
"fmt"
"io/ioutil"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
@@ -26,9 +27,9 @@ import (
// return err
// }
// ...
func EnsureSAToken(ctx context.Context, saName, saNamespace string, client kubernetes.Interface, renderSAToken func([]byte) error) error {
func EnsureSAToken(ctx context.Context, saName, saNamespace string, saClient kubernetes.Interface, renderSAToken func([]byte) error) error {
// get the service account
sa, err := client.CoreV1().ServiceAccounts(saNamespace).Get(ctx, saName, metav1.GetOptions{})
sa, err := saClient.CoreV1().ServiceAccounts(saNamespace).Get(ctx, saName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -40,7 +41,8 @@ func EnsureSAToken(ctx context.Context, saName, saNamespace string, client kuber
// get the token secret
tokenSecretName := secret.Name
tokenSecret, err := client.CoreV1().Secrets(saNamespace).Get(ctx, tokenSecretName, metav1.GetOptions{})
// get the token secret
tokenSecret, err := saClient.CoreV1().Secrets(saNamespace).Get(ctx, tokenSecretName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -61,16 +63,35 @@ func EnsureSAToken(ctx context.Context, saName, saNamespace string, client kuber
}
// RenderToKubeconfigSecret would render saToken to a secret.
func RenderToKubeconfigSecret(secretName, secretNamespace string, templateKubeconfig *rest.Config, client coreclientv1.SecretsGetter, recorder events.Recorder) func([]byte) error {
func RenderToKubeconfigSecret(secretName, secretNamespace string, templateKubeconfig *rest.Config, secretClient coreclientv1.SecretsGetter, recorder events.Recorder) func([]byte) error {
return func(saToken []byte) error {
var c *clientcmdapi.Cluster
if len(templateKubeconfig.CAData) != 0 {
c = &clientcmdapi.Cluster{
Server: templateKubeconfig.Host,
CertificateAuthorityData: templateKubeconfig.CAData,
}
} else if len(templateKubeconfig.CAFile) != 0 {
caData, err := ioutil.ReadFile(templateKubeconfig.CAFile)
if err != nil {
return err
}
c = &clientcmdapi.Cluster{
Server: templateKubeconfig.Host,
CertificateAuthorityData: caData,
}
} else {
c = &clientcmdapi.Cluster{
Server: templateKubeconfig.Host,
InsecureSkipTLSVerify: true,
}
}
kubeconfigContent, err := clientcmd.Write(clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: map[string]*clientcmdapi.Cluster{
"cluster": {
Server: templateKubeconfig.Host,
CertificateAuthorityData: templateKubeconfig.CAData,
},
"cluster": c,
},
Contexts: map[string]*clientcmdapi.Context{
"context": {
@@ -88,7 +109,7 @@ func RenderToKubeconfigSecret(secretName, secretNamespace string, templateKubeco
if err != nil {
return err
}
_, _, err = resourceapply.ApplySecret(client, recorder, &corev1.Secret{
_, _, err = resourceapply.ApplySecret(secretClient, recorder, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: secretNamespace,
Name: secretName,

View File

@@ -10,6 +10,8 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
@@ -37,48 +39,78 @@ var (
"managedclusters.cluster.open-cluster-management.io",
}
staticResourceFiles = []string{
"cluster-manager/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml",
"cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml",
"cluster-manager/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml",
"cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml",
"cluster-manager/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml",
"cluster-manager/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml",
"cluster-manager/0000_02_clusters.open-cluster-management.io_placements.crd.yaml",
"cluster-manager/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml",
"cluster-manager/0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml",
"cluster-manager/cluster-manager-registration-clusterrole.yaml",
"cluster-manager/cluster-manager-registration-clusterrolebinding.yaml",
"cluster-manager/cluster-manager-namespace.yaml",
"cluster-manager/cluster-manager-registration-serviceaccount.yaml",
"cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml",
"cluster-manager/cluster-manager-registration-webhook-clusterrolebinding.yaml",
"cluster-manager/cluster-manager-registration-webhook-service.yaml",
"cluster-manager/cluster-manager-registration-webhook-serviceaccount.yaml",
"cluster-manager/cluster-manager-registration-webhook-clustersetbinding-validatingconfiguration.yaml",
"cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml",
"cluster-manager/cluster-manager-registration-webhook-mutatingconfiguration.yaml",
"cluster-manager/cluster-manager-work-webhook-clusterrole.yaml",
"cluster-manager/cluster-manager-work-webhook-clusterrolebinding.yaml",
"cluster-manager/cluster-manager-work-webhook-service.yaml",
"cluster-manager/cluster-manager-work-webhook-serviceaccount.yaml",
"cluster-manager/cluster-manager-work-webhook-validatingconfiguration.yaml",
"cluster-manager/cluster-manager-placement-clusterrole.yaml",
"cluster-manager/cluster-manager-placement-clusterrolebinding.yaml",
"cluster-manager/cluster-manager-placement-serviceaccount.yaml",
namespaceResource = "cluster-manager/cluster-manager-namespace.yaml"
// crdResourceFiles should be deployed in the hub cluster
hubCRDResourceFiles = []string{
"cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml",
"cluster-manager/hub/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml",
"cluster-manager/hub/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml",
"cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml",
"cluster-manager/hub/0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml",
"cluster-manager/hub/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml",
"cluster-manager/hub/0000_02_clusters.open-cluster-management.io_placements.crd.yaml",
"cluster-manager/hub/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml",
"cluster-manager/hub/0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml",
}
// apiserviceResoruceFiles requires CABundle in HubConfig
apiserviceResoruceFiles = []string{
"cluster-manager/cluster-manager-work-webhook-apiservice.yaml",
"cluster-manager/cluster-manager-registration-webhook-apiservice.yaml",
// The hubWebhookResourceFiles should be deployed in the hub cluster
// The service should may point to a external url which represent the webhook-server's address.
hubWebhookResourceFiles = []string{
// registration-webhook
"cluster-manager/hub/cluster-manager-registration-webhook-validatingconfiguration.yaml",
"cluster-manager/hub/cluster-manager-registration-webhook-mutatingconfiguration.yaml",
"cluster-manager/hub/cluster-manager-registration-webhook-clustersetbinding-validatingconfiguration.yaml",
// work-webhook
"cluster-manager/hub/cluster-manager-work-webhook-validatingconfiguration.yaml",
}
// The apiservice resources should be applied after CABundle created.
// And also should be deployed in the hub cluster.
hubApiserviceFiles = []string{
"cluster-manager/hub/cluster-manager-work-webhook-apiservice.yaml",
"cluster-manager/hub/cluster-manager-registration-webhook-apiservice.yaml",
}
// The hubDetachedWebhookServiceFiles should only be deployed on the hub cluster when the deploy mode is detached.
hubDefaultWebhookServiceFiles = []string{
"cluster-manager/hub/cluster-manager-registration-webhook-service.yaml",
"cluster-manager/hub/cluster-manager-work-webhook-service.yaml",
}
hubDetachedWebhookServiceFiles = []string{
"cluster-manager/hub/cluster-manager-registration-webhook-service-detached.yaml",
"cluster-manager/hub/cluster-manager-work-webhook-service-detached.yaml",
}
// hubDetachedWebhookEndpointFiles only apply when the deploy mode is detached and address is IPFormat.
hubDetachedWebhookEndpointRegistration = "cluster-manager/hub/cluster-manager-registration-webhook-endpoint-detached.yaml"
hubDetachedWebhookEndpointWork = "cluster-manager/hub/cluster-manager-work-webhook-endpoint-detached.yaml"
// The hubRbacResourceFiles should be deployed in the hub cluster.
hubRbacResourceFiles = []string{
// registration
"cluster-manager/hub/cluster-manager-registration-clusterrole.yaml",
"cluster-manager/hub/cluster-manager-registration-clusterrolebinding.yaml",
"cluster-manager/hub/cluster-manager-registration-serviceaccount.yaml",
// registration-webhook
"cluster-manager/hub/cluster-manager-registration-webhook-clusterrole.yaml",
"cluster-manager/hub/cluster-manager-registration-webhook-clusterrolebinding.yaml",
"cluster-manager/hub/cluster-manager-registration-webhook-serviceaccount.yaml",
// work-webhook
"cluster-manager/hub/cluster-manager-work-webhook-clusterrole.yaml",
"cluster-manager/hub/cluster-manager-work-webhook-clusterrolebinding.yaml",
"cluster-manager/hub/cluster-manager-work-webhook-serviceaccount.yaml",
// placement
"cluster-manager/hub/cluster-manager-placement-clusterrole.yaml",
"cluster-manager/hub/cluster-manager-placement-clusterrolebinding.yaml",
"cluster-manager/hub/cluster-manager-placement-serviceaccount.yaml",
}
// All deployments should be deployed in the management cluster.
deploymentFiles = []string{
"cluster-manager/cluster-manager-registration-deployment.yaml",
"cluster-manager/cluster-manager-registration-webhook-deployment.yaml",
"cluster-manager/cluster-manager-work-webhook-deployment.yaml",
"cluster-manager/cluster-manager-placement-deployment.yaml",
"cluster-manager/management/cluster-manager-registration-deployment.yaml",
"cluster-manager/management/cluster-manager-registration-webhook-deployment.yaml",
"cluster-manager/management/cluster-manager-work-webhook-deployment.yaml",
"cluster-manager/management/cluster-manager-placement-deployment.yaml",
}
)
@@ -90,31 +122,36 @@ const (
)
type clusterManagerController struct {
clusterManagerClient operatorv1client.ClusterManagerInterface
clusterManagerLister operatorlister.ClusterManagerLister
kubeClient kubernetes.Interface
apiExtensionClient apiextensionsclient.Interface
apiRegistrationClient apiregistrationclient.APIServicesGetter
configMapLister corev1listers.ConfigMapLister
clusterManagerClient operatorv1client.ClusterManagerInterface
clusterManagerLister operatorlister.ClusterManagerLister
operatorKubeClient kubernetes.Interface
operatorKubeconfig *rest.Config
configMapLister corev1listers.ConfigMapLister
recorder events.Recorder
// For testcases which don't need these functions, we could set fake funcs
generateHubClusterClients func(hubConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, apiregistrationclient.APIServicesGetter, error)
ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error
}
// NewClusterManagerController construct cluster manager hub controller
func NewClusterManagerController(
kubeClient kubernetes.Interface,
apiExtensionClient apiextensionsclient.Interface,
apiRegistrationClient apiregistrationclient.APIServicesGetter,
operatorKubeClient kubernetes.Interface,
operatorKubeconfig *rest.Config,
clusterManagerClient operatorv1client.ClusterManagerInterface,
clusterManagerInformer operatorinformer.ClusterManagerInformer,
deploymentInformer appsinformer.DeploymentInformer,
configMapInformer corev1informers.ConfigMapInformer,
recorder events.Recorder) factory.Controller {
recorder events.Recorder,
) factory.Controller {
controller := &clusterManagerController{
kubeClient: kubeClient,
apiExtensionClient: apiExtensionClient,
apiRegistrationClient: apiRegistrationClient,
clusterManagerClient: clusterManagerClient,
clusterManagerLister: clusterManagerInformer.Lister(),
configMapLister: configMapInformer.Lister(),
operatorKubeClient: operatorKubeClient,
operatorKubeconfig: operatorKubeconfig,
clusterManagerClient: clusterManagerClient,
clusterManagerLister: clusterManagerInformer.Lister(),
configMapLister: configMapInformer.Lister(),
recorder: recorder,
generateHubClusterClients: generateHubClients,
ensureSAKubeconfigs: ensureSAKubeconfigs,
}
return factory.New().WithSync(controller.sync).
@@ -150,15 +187,25 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
return err
}
clusterManager = clusterManager.DeepCopy()
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManager.Spec.DeployOption.Mode)
clusterManagerMode := clusterManager.Spec.DeployOption.Mode
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManagerMode)
// This config is used to render template of manifests.
config := manifests.HubConfig{
ClusterManagerName: clusterManager.Name,
ClusterManagerNamespace: clusterManagerNamespace,
RegistrationImage: clusterManager.Spec.RegistrationImagePullSpec,
WorkImage: clusterManager.Spec.WorkImagePullSpec,
PlacementImage: clusterManager.Spec.PlacementImagePullSpec,
Replica: helpers.DetermineReplicaByNodes(ctx, n.kubeClient),
Replica: helpers.DetermineReplicaByNodes(ctx, n.operatorKubeClient),
DetachedMode: clusterManager.Spec.DeployOption.Mode == operatorapiv1.InstallModeDetached,
}
// If we are deploying in the detached mode, it requires us to create webhook in a different way with the default mode.
// In the detached mode, the webhook servers is running in the management cluster but the users are accessing the hub cluster.
// So we need to add configuration to make the apiserver of the hub cluster could access the webhook servers on the management cluster.
if clusterManager.Spec.DeployOption.Detached != nil {
config.RegistrationWebhook = convertWebhookConfiguration(clusterManager.Spec.DeployOption.Detached.RegistrationWebhookConfiguration)
config.WorkWebhook = convertWebhookConfiguration(clusterManager.Spec.DeployOption.Detached.WorkWebhookConfiguration)
}
// Update finalizer at first
@@ -177,115 +224,66 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
}
}
// ClusterManager is deleting, we remove its related resources on hub
// Get clients of the hub cluster and the management cluster
hubKubeConfig, err := helpers.GetHubKubeconfig(ctx, n.operatorKubeconfig, n.operatorKubeClient, clusterManagerName, clusterManagerMode)
if err != nil {
return err
}
hubClient, hubApiExtensionClient, hubApiRegistrationClient, err := n.generateHubClusterClients(hubKubeConfig)
if err != nil {
return err
}
managementClient := n.operatorKubeClient // We assume that operator is always running on the management cluster.
// If the ClusterManager is deleting, we remove its related resources on hub
if !clusterManager.DeletionTimestamp.IsZero() {
if err := n.cleanUp(ctx, controllerContext, config); err != nil {
if err := cleanUpHub(ctx, controllerContext, clusterManagerMode, hubClient, hubApiExtensionClient, hubApiRegistrationClient, config); err != nil {
return err
}
return n.removeClusterManagerFinalizer(ctx, clusterManager)
if err := cleanUpManagement(ctx, controllerContext, managementClient, config); err != nil {
return err
}
return removeClusterManagerFinalizer(ctx, n.clusterManagerClient, clusterManager)
}
// Apply static files(Which don't require CABundle)
var relatedResources []operatorapiv1.RelatedResourceMeta
resourceResults := helpers.ApplyDirectly(
n.kubeClient,
n.apiExtensionClient,
n.apiRegistrationClient,
controllerContext.Recorder(),
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
helpers.SetRelatedResourcesStatusesWithObj(&relatedResources, objData)
return objData, nil
},
staticResourceFiles...,
)
errs := []error{}
for _, result := range resourceResults {
if result.Error != nil {
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
}
}
// try to load ca bundle from configmap
// if the namespace not found yet, skip this and apply static resources first
caBundle := "placeholder"
configmap, err := n.configMapLister.ConfigMaps(clusterManagerNamespace).Get(caBundleConfigmap)
switch {
case errors.IsNotFound(err):
// do nothing
case err != nil:
// Apply resources on the hub cluster
hubAppliedErrs, err := applyHubResources(clusterManagerNamespace,
clusterManagerMode,
config,
&relatedResources,
hubClient, hubApiExtensionClient, hubApiRegistrationClient,
n.configMapLister, n.recorder)
if err != nil {
return err
default:
if cb := configmap.Data["ca-bundle.crt"]; len(cb) > 0 {
caBundle = cb
}
}
encodedCaBundle := base64.StdEncoding.EncodeToString([]byte(caBundle))
config.RegistrationAPIServiceCABundle = encodedCaBundle
config.WorkAPIServiceCABundle = encodedCaBundle
// Apply apiservice files
apiserviceResults := helpers.ApplyDirectly(
n.kubeClient,
n.apiExtensionClient,
n.apiRegistrationClient,
controllerContext.Recorder(),
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
helpers.SetRelatedResourcesStatusesWithObj(&relatedResources, objData)
return objData, nil
},
apiserviceResoruceFiles...,
// Apply resources on the management cluster
currentGenerations, managementAppliedErrs, err := applyManagementResources(
ctx,
clusterManager,
config,
&relatedResources,
hubClient, hubKubeConfig,
managementClient, n.recorder, n.ensureSAKubeconfigs,
)
for _, result := range apiserviceResults {
if result.Error != nil {
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
}
}
currentGenerations := []operatorapiv1.GenerationStatus{}
// Render deployment manifest and apply
for _, file := range deploymentFiles {
currentGeneration, err := helpers.ApplyDeployment(
n.kubeClient,
clusterManager.Status.Generations,
clusterManager.Spec.NodePlacement,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
helpers.SetRelatedResourcesStatusesWithObj(&relatedResources, objData)
return objData, nil
},
controllerContext.Recorder(),
file)
if err != nil {
errs = append(errs, err)
}
currentGenerations = append(currentGenerations, currentGeneration)
if err != nil {
return err
}
// Update status
errs := append(hubAppliedErrs, managementAppliedErrs...)
conditions := &clusterManager.Status.Conditions
observedKlusterletGeneration := clusterManager.Status.ObservedGeneration
observedGeneration := clusterManager.Status.ObservedGeneration
if len(errs) == 0 {
meta.SetStatusCondition(conditions, metav1.Condition{
Type: clusterManagerApplied,
Status: metav1.ConditionTrue,
Reason: "ClusterManagerApplied",
Message: "Components of cluster manager is applied",
Message: "Components of cluster manager are applied",
})
observedKlusterletGeneration = clusterManager.Generation
observedGeneration = clusterManager.Generation
} else {
meta.SetStatusCondition(conditions, metav1.Condition{
Type: clusterManagerApplied,
@@ -295,14 +293,13 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
})
}
// Update status
_, _, updatedErr := helpers.UpdateClusterManagerStatus(
ctx, n.clusterManagerClient, clusterManager.Name,
helpers.UpdateClusterManagerConditionFn(*conditions...),
helpers.UpdateClusterManagerGenerationsFn(currentGenerations...),
helpers.UpdateClusterManagerRelatedResourcesFn(relatedResources...),
func(oldStatus *operatorapiv1.ClusterManagerStatus) error {
oldStatus.ObservedGeneration = observedKlusterletGeneration
oldStatus.ObservedGeneration = observedGeneration
return nil
},
)
@@ -313,7 +310,162 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
return operatorhelpers.NewMultiLineAggregate(errs)
}
func (n *clusterManagerController) removeClusterManagerFinalizer(ctx context.Context, deploy *operatorapiv1.ClusterManager) error {
func applyHubResources(
clusterManagerNamespace string,
clusterManagerMode operatorapiv1.InstallMode,
manifestsConfig manifests.HubConfig, // used to render templates
relatedResources *[]operatorapiv1.RelatedResourceMeta,
// hub clients
hubClient kubernetes.Interface, hubApiExtensionClient apiextensionsclient.Interface, hubApiRegistrationClient apiregistrationclient.APIServicesGetter,
configMapLister corev1listers.ConfigMapLister,
recorder events.Recorder,
) (appliedErrs []error, err error) {
// Apply hub cluster resources
hubResources := getHubResources(clusterManagerMode, manifestsConfig.RegistrationWebhook.IsIPFormat, manifestsConfig.WorkWebhook.IsIPFormat)
resourceResults := helpers.ApplyDirectly(
hubClient,
hubApiExtensionClient,
hubApiRegistrationClient,
recorder,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, manifestsConfig).Data
helpers.SetRelatedResourcesStatusesWithObj(relatedResources, objData)
return objData, nil
},
hubResources...,
)
for _, result := range resourceResults {
if result.Error != nil {
appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
}
}
// Try to load ca bundle from configmap
// If the configmap is found, populate it into configmap.
// If the configmap not found yet, skip this and apply other resources first.
caBundle := "placeholder"
configmap, err := configMapLister.ConfigMaps(clusterManagerNamespace).Get(caBundleConfigmap)
switch {
case errors.IsNotFound(err):
// do nothing
case err != nil:
return appliedErrs, err
default:
if cb := configmap.Data["ca-bundle.crt"]; len(cb) > 0 {
caBundle = cb
}
}
encodedCaBundle := base64.StdEncoding.EncodeToString([]byte(caBundle))
manifestsConfig.RegistrationAPIServiceCABundle = encodedCaBundle
manifestsConfig.WorkAPIServiceCABundle = encodedCaBundle
// Apply Apiservice files to hub cluster.
// The reason why apply Apiservice after apply other staticfiles(including namespace) is because Apiservices requires the CABundleConfigmap.
// And it will return an error(uncatchable with NotFound type) if the namespace is not created.
apiserviceResults := helpers.ApplyDirectly(
hubClient,
hubApiExtensionClient,
hubApiRegistrationClient,
recorder,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, manifestsConfig).Data
helpers.SetRelatedResourcesStatusesWithObj(relatedResources, objData)
return objData, nil
},
hubApiserviceFiles...,
)
for _, result := range apiserviceResults {
if result.Error != nil {
appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
}
}
return appliedErrs, nil
}
func applyManagementResources(
ctx context.Context,
clusterManager *operatorapiv1.ClusterManager,
manifestsConfig manifests.HubConfig,
relatedResources *[]operatorapiv1.RelatedResourceMeta,
hubClient kubernetes.Interface, hubKubeConfig *rest.Config,
managementKubeClient kubernetes.Interface,
recorder events.Recorder,
ensureSAKubeconfigs func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error,
) (currentGenerations []operatorapiv1.GenerationStatus, appliedErrs []error, err error) {
// Apply management cluster resources(namespace and deployments).
// Note: the certrotation-controller will create CABundle after the namespace applied.
// And CABundle is used to render apiservice resources.
managementResources := []string{namespaceResource}
resourceResults := helpers.ApplyDirectly(
managementKubeClient, nil, nil,
recorder,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, manifestsConfig).Data
helpers.SetRelatedResourcesStatusesWithObj(relatedResources, objData)
return objData, nil
},
managementResources...,
)
for _, result := range resourceResults {
if result.Error != nil {
appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
}
}
clusterManagerName := clusterManager.Name
clusterManagerMode := clusterManager.Spec.DeployOption.Mode
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManagerMode)
// In the Detached mode, ensure the rbac kubeconfig secrets is existed for deployments to mount.
// In this step, we get serviceaccount token from the hub cluster to form a kubeconfig and set it as a secret on the management cluster.
// Before this step, the serviceaccounts in the hub cluster and the namespace in the management cluster should be applied first.
if clusterManagerMode == operatorapiv1.InstallModeDetached {
err = ensureSAKubeconfigs(ctx, clusterManagerName, clusterManagerNamespace,
hubKubeConfig, hubClient, managementKubeClient, recorder)
if err != nil {
return currentGenerations, appliedErrs, err
}
}
for _, file := range deploymentFiles {
currentGeneration, err := helpers.ApplyDeployment(
managementKubeClient,
clusterManager.Status.Generations,
clusterManager.Spec.NodePlacement,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
objData := assets.MustCreateAssetFromTemplate(name, template, manifestsConfig).Data
helpers.SetRelatedResourcesStatusesWithObj(relatedResources, objData)
return objData, nil
},
recorder,
file)
if err != nil {
appliedErrs = append(appliedErrs, err)
}
currentGenerations = append(currentGenerations, currentGeneration)
}
return currentGenerations, appliedErrs, nil
}
func removeClusterManagerFinalizer(ctx context.Context, clusterManagerClient operatorv1client.ClusterManagerInterface, deploy *operatorapiv1.ClusterManager) error {
copiedFinalizers := []string{}
for i := range deploy.Finalizers {
if deploy.Finalizers[i] == clusterManagerFinalizer {
@@ -324,7 +476,7 @@ func (n *clusterManagerController) removeClusterManagerFinalizer(ctx context.Con
if len(deploy.Finalizers) != len(copiedFinalizers) {
deploy.Finalizers = copiedFinalizers
_, err := n.clusterManagerClient.Update(ctx, deploy, metav1.UpdateOptions{})
_, err := clusterManagerClient.Update(ctx, deploy, metav1.UpdateOptions{})
return err
}
@@ -333,8 +485,8 @@ func (n *clusterManagerController) removeClusterManagerFinalizer(ctx context.Con
// removeCRD removes crd, and check if crd resource is removed. Since the related cr is still being deleted,
// it will check the crd existence after deletion, and only return nil when crd is not found.
func (n *clusterManagerController) removeCRD(ctx context.Context, name string) error {
err := n.apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Delete(
func removeCRD(ctx context.Context, apiExtensionClient apiextensionsclient.Interface, name string) error {
err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Delete(
ctx, name, metav1.DeleteOptions{})
switch {
case errors.IsNotFound(err):
@@ -343,7 +495,7 @@ func (n *clusterManagerController) removeCRD(ctx context.Context, name string) e
return err
}
_, err = n.apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{})
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
return nil
@@ -354,11 +506,13 @@ func (n *clusterManagerController) removeCRD(ctx context.Context, name string) e
return fmt.Errorf("CRD %s is still being deleted", name)
}
func (n *clusterManagerController) cleanUp(
ctx context.Context, controllerContext factory.SyncContext, config manifests.HubConfig) error {
func cleanUpHub(ctx context.Context, controllerContext factory.SyncContext,
mode operatorapiv1.InstallMode,
kubeClient kubernetes.Interface, apiExtensionClient apiextensionsclient.Interface, apiRegistrationClient apiregistrationclient.APIServicesGetter,
config manifests.HubConfig) error {
// Remove crd
for _, name := range crdNames {
err := n.removeCRD(ctx, name)
err := removeCRD(ctx, apiExtensionClient, name)
if err != nil {
return err
}
@@ -366,13 +520,13 @@ func (n *clusterManagerController) cleanUp(
}
// Remove All Static files
allResourceFiles := append(staticResourceFiles, apiserviceResoruceFiles...)
for _, file := range allResourceFiles {
hubResources := append(getHubResources(mode, config.RegistrationWebhook.IsIPFormat, config.WorkWebhook.IsIPFormat), hubApiserviceFiles...)
for _, file := range hubResources {
err := helpers.CleanUpStaticObject(
ctx,
n.kubeClient,
n.apiExtensionClient,
n.apiRegistrationClient,
kubeClient,
apiExtensionClient,
apiRegistrationClient,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
@@ -389,3 +543,126 @@ func (n *clusterManagerController) cleanUp(
return nil
}
func cleanUpManagement(ctx context.Context, controllerContext factory.SyncContext,
kubeClient kubernetes.Interface, config manifests.HubConfig) error {
// Remove All Static files
managementResources := []string{namespaceResource} // because namespace is removed, we don't need to remove deployments explicitly
for _, file := range managementResources {
err := helpers.CleanUpStaticObject(
ctx,
kubeClient, nil, nil,
func(name string) ([]byte, error) {
template, err := manifests.ClusterManagerManifestFiles.ReadFile(name)
if err != nil {
return nil, err
}
return assets.MustCreateAssetFromTemplate(name, template, config).Data, nil
},
file,
)
if err != nil {
return err
}
}
return nil
}
func generateHubClients(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, apiregistrationclient.APIServicesGetter, error) {
hubClient, err := kubernetes.NewForConfig(hubKubeConfig)
if err != nil {
return nil, nil, nil, err
}
hubApiExtensionClient, err := apiextensionsclient.NewForConfig(hubKubeConfig)
if err != nil {
return nil, nil, nil, err
}
hubApiRegistrationClient, err := apiregistrationclient.NewForConfig(hubKubeConfig)
if err != nil {
return nil, nil, nil, err
}
return hubClient, hubApiExtensionClient, hubApiRegistrationClient, nil
}
// ensureSAKubeconfigs is used to create a kubeconfig with a token from a ServiceAccount.
// We create a ServiceAccount with a rolebinding on the hub cluster, and then use the token of the ServiceAccount as the user of the kubeconfig.
// Finally, a deployment on the management cluster would use the kubeconfig to access resources on the hub cluster.
func ensureSAKubeconfigs(ctx context.Context, clusterManagerName, clusterManagerNamespace string,
hubKubeConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error {
// setup template kubeconfig
ensureSAToken := func(saName string) error {
return helpers.EnsureSAToken(ctx, saName, clusterManagerNamespace, hubClient,
helpers.RenderToKubeconfigSecret(saName+"-kubeconfig", clusterManagerNamespace, &rest.Config{
Host: hubKubeConfig.Host,
TLSClientConfig: rest.TLSClientConfig{
CAData: hubKubeConfig.CAData,
},
}, managementClient.CoreV1(), recorder))
}
sas := getSAs(clusterManagerName)
err := retry.OnError(retry.DefaultBackoff, func(e error) bool {
return true
}, func() error {
for _, sa := range sas {
err := ensureSAToken(sa)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return nil
}
// getSAs return serviceaccount names of all hub components
func getSAs(clusterManagerName string) []string {
return []string{
clusterManagerName + "-registration-controller-sa",
clusterManagerName + "-registration-webhook-sa",
clusterManagerName + "-work-webhook-sa",
clusterManagerName + "-placement-controller-sa",
}
}
func getHubResources(mode operatorapiv1.InstallMode, isRegistrationIPFormat, isWorkIPFormat bool) []string {
hubResources := []string{namespaceResource}
hubResources = append(hubResources, hubCRDResourceFiles...)
hubResources = append(hubResources, hubWebhookResourceFiles...)
hubResources = append(hubResources, hubRbacResourceFiles...)
// the hubDetachedWebhookServiceFiles are only used in detached mode
if mode == operatorapiv1.InstallModeDetached {
hubResources = append(hubResources, hubDetachedWebhookServiceFiles...)
if isRegistrationIPFormat {
hubResources = append(hubResources, hubDetachedWebhookEndpointRegistration)
}
if isWorkIPFormat {
hubResources = append(hubResources, hubDetachedWebhookEndpointWork)
}
} else {
hubResources = append(hubResources, hubDefaultWebhookServiceFiles...)
}
return hubResources
}
// TODO: support IPV6 address
func isIPFormat(address string) bool {
runes := []rune(address)
for i := 0; i < len(runes); i++ {
if (runes[i] < '0' || runes[i] > '9') && runes[i] != '.' {
return false
}
}
return true
}
func convertWebhookConfiguration(webhookConfiguration operatorapiv1.WebhookConfiguration) manifests.Webhook {
return manifests.Webhook{
Address: webhookConfiguration.Address,
Port: webhookConfiguration.Port,
IsIPFormat: isIPFormat(webhookConfiguration.Address),
}
}

View File

@@ -1,22 +1,29 @@
package clustermanagercontroller
import (
"context"
"strings"
"testing"
"time"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
fakeapiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
fakekube "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
clienttesting "k8s.io/client-go/testing"
fakeapiregistration "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
fakeoperatorlient "open-cluster-management.io/api/client/operator/clientset/versioned/fake"
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
operatorapiv1 "open-cluster-management.io/api/operator/v1"
@@ -25,12 +32,17 @@ import (
testinghelper "open-cluster-management.io/registration-operator/pkg/helpers/testing"
)
var (
ctx = context.Background()
)
type testController struct {
controller *clusterManagerController
kubeClient *fakekube.Clientset
apiExtensionClient *fakeapiextensions.Clientset
apiRegistrationClient *fakeapiregistration.Clientset
operatorClient *fakeoperatorlient.Clientset
clusterManagerController *clusterManagerController
managementKubeClient *fakekube.Clientset
hubKubeClient *fakekube.Clientset
apiExtensionClient *fakeapiextensions.Clientset
apiRegistrationClient *fakeapiregistration.Clientset
operatorClient *fakeoperatorlient.Clientset
}
func newClusterManager(name string) *operatorapiv1.ClusterManager {
@@ -54,7 +66,7 @@ func newTestController(clustermanager *operatorapiv1.ClusterManager) *testContro
fakeOperatorClient := fakeoperatorlient.NewSimpleClientset(clustermanager)
operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute)
hubController := &clusterManagerController{
clusterManagerController := &clusterManagerController{
clusterManagerClient: fakeOperatorClient.OperatorV1().ClusterManagers(),
clusterManagerLister: operatorInformers.Operator().V1().ClusterManagers().Lister(),
configMapLister: kubeInfomers.Core().V1().ConfigMaps().Lister(),
@@ -64,30 +76,32 @@ func newTestController(clustermanager *operatorapiv1.ClusterManager) *testContro
store.Add(clustermanager)
return &testController{
controller: hubController,
operatorClient: fakeOperatorClient,
clusterManagerController: clusterManagerController,
operatorClient: fakeOperatorClient,
}
}
func (t *testController) withKubeObject(objects ...runtime.Object) *testController {
fakeKubeClient := fakekube.NewSimpleClientset(objects...)
t.controller.kubeClient = fakeKubeClient
t.kubeClient = fakeKubeClient
return t
}
func setup(t *testing.T, tc *testController, crds ...runtime.Object) {
fakeHubKubeClient := fakekube.NewSimpleClientset()
fakeManagementKubeClient := fakekube.NewSimpleClientset()
fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset(crds...)
fakeAPIRegistrationClient := fakeapiregistration.NewSimpleClientset()
func (t *testController) withCRDObject(objects ...runtime.Object) *testController {
fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset(objects...)
t.controller.apiExtensionClient = fakeAPIExtensionClient
t.apiExtensionClient = fakeAPIExtensionClient
return t
}
// set clients in test controller
tc.apiExtensionClient = fakeAPIExtensionClient
tc.apiRegistrationClient = fakeAPIRegistrationClient
tc.hubKubeClient = fakeHubKubeClient
tc.managementKubeClient = fakeManagementKubeClient
func (t *testController) withAPIServiceObject(objects ...runtime.Object) *testController {
fakeAPIRegistrationClient := fakeapiregistration.NewSimpleClientset(objects...)
t.controller.apiRegistrationClient = fakeAPIRegistrationClient.ApiregistrationV1()
t.apiRegistrationClient = fakeAPIRegistrationClient
return t
// set clients in clustermanager controller
tc.clusterManagerController.recorder = eventstesting.NewTestingEventRecorder(t)
tc.clusterManagerController.operatorKubeClient = fakeManagementKubeClient
tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, apiregistrationclient.APIServicesGetter, error) {
return fakeHubKubeClient, fakeAPIExtensionClient, fakeAPIRegistrationClient.ApiregistrationV1(), nil
}
tc.clusterManagerController.ensureSAKubeconfigs = func(ctx context.Context, clusterManagerName, clusterManagerNamespace string, hubConfig *rest.Config, hubClient, managementClient kubernetes.Interface, recorder events.Recorder) error {
return nil
}
}
func ensureObject(t *testing.T, object runtime.Object, hubCore *operatorapiv1.ClusterManager) {
@@ -112,16 +126,18 @@ func ensureObject(t *testing.T, object runtime.Object, hubCore *operatorapiv1.Cl
// TestSyncDeploy tests sync manifests of hub component
func TestSyncDeploy(t *testing.T) {
clusterManager := newClusterManager("testhub")
controller := newTestController(clusterManager).withCRDObject().withKubeObject().withAPIServiceObject()
tc := newTestController(clusterManager)
setup(t, tc)
syncContext := testinghelper.NewFakeSyncContext(t, "testhub")
err := controller.controller.sync(nil, syncContext)
err := tc.clusterManagerController.sync(ctx, syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
t.Fatalf("Expected no error when sync, %v", err)
}
createKubeObjects := []runtime.Object{}
kubeActions := controller.kubeClient.Actions()
kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) // record objects from both hub and management cluster
for _, action := range kubeActions {
if action.GetVerb() == "create" {
object := action.(clienttesting.CreateActionImpl).Object
@@ -130,13 +146,14 @@ func TestSyncDeploy(t *testing.T) {
}
// Check if resources are created as expected
testinghelper.AssertEqualNumber(t, len(createKubeObjects), 23)
// We expect creat the namespace twice respectively in the management cluster and the hub cluster.
testinghelper.AssertEqualNumber(t, len(createKubeObjects), 24)
for _, object := range createKubeObjects {
ensureObject(t, object, clusterManager)
}
createCRDObjects := []runtime.Object{}
crdActions := controller.apiExtensionClient.Actions()
crdActions := tc.apiExtensionClient.Actions()
for _, action := range crdActions {
if action.GetVerb() == "create" {
object := action.(clienttesting.CreateActionImpl).Object
@@ -147,7 +164,7 @@ func TestSyncDeploy(t *testing.T) {
testinghelper.AssertEqualNumber(t, len(createCRDObjects), 9)
createAPIServiceObjects := []runtime.Object{}
apiServiceActions := controller.apiRegistrationClient.Actions()
apiServiceActions := tc.apiRegistrationClient.Actions()
for _, action := range apiServiceActions {
if action.GetVerb() == "create" {
object := action.(clienttesting.CreateActionImpl).Object
@@ -157,7 +174,7 @@ func TestSyncDeploy(t *testing.T) {
// Check if resources are created as expected
testinghelper.AssertEqualNumber(t, len(createAPIServiceObjects), 2)
clusterManagerAction := controller.operatorClient.Actions()
clusterManagerAction := tc.operatorClient.Actions()
testinghelper.AssertEqualNumber(t, len(clusterManagerAction), 2)
testinghelper.AssertAction(t, clusterManagerAction[1], "update")
testinghelper.AssertOnlyConditions(
@@ -170,27 +187,30 @@ func TestSyncDelete(t *testing.T) {
clusterManager := newClusterManager("testhub")
now := metav1.Now()
clusterManager.ObjectMeta.SetDeletionTimestamp(&now)
controller := newTestController(clusterManager).withCRDObject().withKubeObject().withAPIServiceObject()
tc := newTestController(clusterManager)
setup(t, tc)
syncContext := testinghelper.NewFakeSyncContext(t, "testhub")
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManager.ClusterName, clusterManager.Spec.DeployOption.Mode)
err := controller.controller.sync(nil, syncContext)
err := tc.clusterManagerController.sync(ctx, syncContext)
if err != nil {
t.Errorf("Expected non error when sync, %v", err)
t.Fatalf("Expected non error when sync, %v", err)
}
deleteKubeActions := []clienttesting.DeleteActionImpl{}
kubeActions := controller.kubeClient.Actions()
kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...)
for _, action := range kubeActions {
if action.GetVerb() == "delete" {
deleteKubeAction := action.(clienttesting.DeleteActionImpl)
deleteKubeActions = append(deleteKubeActions, deleteKubeAction)
}
}
testinghelper.AssertEqualNumber(t, len(deleteKubeActions), 19)
testinghelper.AssertEqualNumber(t, len(deleteKubeActions), 20) // delete namespace both from the hub cluster and the mangement cluster
deleteCRDActions := []clienttesting.DeleteActionImpl{}
crdActions := controller.apiExtensionClient.Actions()
crdActions := tc.apiExtensionClient.Actions()
for _, action := range crdActions {
if action.GetVerb() == "delete" {
deleteCRDAction := action.(clienttesting.DeleteActionImpl)
@@ -201,7 +221,7 @@ func TestSyncDelete(t *testing.T) {
testinghelper.AssertEqualNumber(t, len(deleteCRDActions), 11)
deleteAPIServiceActions := []clienttesting.DeleteActionImpl{}
apiServiceActions := controller.apiRegistrationClient.Actions()
apiServiceActions := tc.apiRegistrationClient.Actions()
for _, action := range apiServiceActions {
if action.GetVerb() == "delete" {
deleteAPIServiceAction := action.(clienttesting.DeleteActionImpl)
@@ -229,11 +249,13 @@ func TestDeleteCRD(t *testing.T) {
Name: crdNames[0],
},
}
controller := newTestController(clusterManager).withCRDObject(crd).withKubeObject().withAPIServiceObject()
tc := newTestController(clusterManager)
setup(t, tc, crd)
// Return crd with the first get, and return not found with the 2nd get
getCount := 0
controller.apiExtensionClient.PrependReactor("get", "customresourcedefinitions", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
tc.apiExtensionClient.PrependReactor("get", "customresourcedefinitions", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
if getCount == 0 {
getCount = getCount + 1
return true, crd, nil
@@ -243,13 +265,34 @@ func TestDeleteCRD(t *testing.T) {
})
syncContext := testinghelper.NewFakeSyncContext(t, "testhub")
err := controller.controller.sync(nil, syncContext)
err := tc.clusterManagerController.sync(ctx, syncContext)
if err == nil {
t.Errorf("Expected error when sync")
t.Fatalf("Expected error when sync at first time")
}
err = controller.controller.sync(nil, syncContext)
err = tc.clusterManagerController.sync(ctx, syncContext)
if err != nil {
t.Errorf("Expected no error when sync: %v", err)
t.Fatalf("Expected no error when sync at second time: %v", err)
}
}
func TestIsIPFormat(t *testing.T) {
cases := []struct {
address string
isIPFormat bool
}{
{
address: "127.0.0.1",
isIPFormat: true,
},
{
address: "localhost",
isIPFormat: false,
},
}
for _, c := range cases {
if isIPFormat(c.address) != c.isIPFormat {
t.Fatalf("expected %v, got %v", c.isIPFormat, isIPFormat(c.address))
}
}
}

View File

@@ -5,11 +5,8 @@ import (
"time"
"github.com/openshift/library-go/pkg/controller/controllercmd"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions"
"open-cluster-management.io/registration-operator/pkg/operators/clustermanager/controllers/certrotationcontroller"
@@ -28,14 +25,6 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte
if err != nil {
return err
}
apiExtensionClient, err := apiextensionsclient.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
apiRegistrationClient, err := apiregistrationclient.NewForConfig(controllerContext.KubeConfig)
if err != nil {
return err
}
// kubeInformer is for 3 usages: configmapInformer, secretInformer, deploynmentInformer
// After we introduced detached mode, the hub components could be installed in a customized namespace.(Before that, it only inform from "open-cluster-management-hub" namespace)
@@ -52,8 +41,7 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte
clusterManagerController := clustermanagercontroller.NewClusterManagerController(
kubeClient,
apiExtensionClient,
apiRegistrationClient.ApiregistrationV1(),
controllerContext.KubeConfig,
operatorClient.OperatorV1().ClusterManagers(),
operatorInformer.Operator().V1().ClusterManagers(),
kubeInformer.Apps().V1().Deployments(),

View File

@@ -0,0 +1,486 @@
package integration
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
v1 "open-cluster-management.io/api/operator/v1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/cert"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"open-cluster-management.io/registration-operator/pkg/helpers"
"open-cluster-management.io/registration-operator/test/integration/util"
)
var _ = ginkgo.Describe("ClusterManager Detached Mode", func() {
var cancel context.CancelFunc
var hubRegistrationDeployment = fmt.Sprintf("%s-registration-controller", clusterManagerName)
ginkgo.BeforeEach(func() {
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
recorder := util.NewIntegrationTestEventRecorder("integration")
// Create the detached hub namespace
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: hubNamespaceDetached,
},
}
_, _, err := resourceapply.ApplyNamespace(detachedKubeClient.CoreV1(), recorder, ns)
gomega.Expect(err).To(gomega.BeNil())
// Create the external hub kubeconfig secret
hubKubeconfigSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: helpers.ExternalHubKubeConfig,
Namespace: hubNamespaceDetached,
},
Data: map[string][]byte{
"kubeconfig": util.NewKubeConfig(detachedRestConfig.Host),
},
}
_, _, err = resourceapply.ApplySecret(detachedKubeClient.CoreV1(), recorder, hubKubeconfigSecret)
gomega.Expect(err).To(gomega.BeNil())
go startHubOperator(ctx, v1.InstallModeDetached)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
})
ginkgo.Context("Deploy and clean hub component", func() {
ginkgo.It("should have expected resource created successfully", func() {
// Check namespace
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Namespaces().Get(context.Background(), hubNamespaceDetached, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check clusterrole/clusterrolebinding
hubRegistrationClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName)
hubRegistrationWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
hubWorkWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubWorkWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubRegistrationWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubWorkWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check service account
hubRegistrationSA := fmt.Sprintf("%s-registration-controller-sa", clusterManagerName)
hubRegistrationWebhookSA := fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName)
hubWorkWebhookSA := fmt.Sprintf("%s-work-webhook-sa", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubRegistrationSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubRegistrationWebhookSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubWorkWebhookSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check deployment
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
hubRegistrationWebhookDeployment := fmt.Sprintf("%s-registration-webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationWebhookDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
hubWorkWebhookDeployment := fmt.Sprintf("%s-work-webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubWorkWebhookDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check service
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Services(hubNamespaceDetached).Get(context.Background(), "cluster-manager-registration-webhook", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Services(hubNamespaceDetached).Get(context.Background(), "cluster-manager-work-webhook", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check webhook secret
registrationWebhookSecret := "registration-webhook-serving-cert"
gomega.Eventually(func() error {
s, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), registrationWebhookSecret, metav1.GetOptions{})
if err != nil {
return err
}
if s.Data == nil {
return fmt.Errorf("s.Data is nil")
} else if s.Data["tls.crt"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.crt'")
} else if s.Data["tls.key"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.key'")
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
workWebhookSecret := "work-webhook-serving-cert"
gomega.Eventually(func() error {
s, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), workWebhookSecret, metav1.GetOptions{})
if err != nil {
return err
}
if s.Data == nil {
return fmt.Errorf("s.Data is nil")
} else if s.Data["tls.crt"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.crt'")
} else if s.Data["tls.key"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.key'")
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check validating webhook
registrationValidtingWebhook := "managedclustervalidators.admission.cluster.open-cluster-management.io"
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), registrationValidtingWebhook, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
workValidtingWebhook := "manifestworkvalidators.admission.work.open-cluster-management.io"
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), workValidtingWebhook, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "Applied", "ClusterManagerApplied", metav1.ConditionTrue)
})
ginkgo.It("Deployment should be updated when clustermanager is changed", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if actual.Generation != actual.Status.ObservedGeneration {
return fmt.Errorf("except generation to be %d, but got %d", actual.Status.ObservedGeneration, actual.Generation)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
clusterManager, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest"
_, err = detachedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
actual, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" {
return fmt.Errorf("expected image to be testimage:latest but get %s", actual.Spec.Template.Spec.Containers[0].Image)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if actual.Generation != actual.Status.ObservedGeneration {
return fmt.Errorf("except generation to be %d, but got %d", actual.Status.ObservedGeneration, actual.Generation)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if relatedResources are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if len(actual.Status.RelatedResources) != 34 {
return fmt.Errorf("should get 34 relatedResources, actual got %v", len(actual.Status.RelatedResources))
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("Deployment should be added nodeSelector and toleration when add nodePlacement into clustermanager", func() {
clusterManager, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
clusterManager.Spec.NodePlacement = v1.NodePlacement{
NodeSelector: map[string]string{"node-role.kubernetes.io/infra": ""},
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/infra",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
},
}
_, err = detachedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
actual, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
if len(actual.Spec.Template.Spec.NodeSelector) == 0 {
return fmt.Errorf("length of node selector should not equals to 0")
}
if _, ok := actual.Spec.Template.Spec.NodeSelector["node-role.kubernetes.io/infra"]; !ok {
return fmt.Errorf("node-role.kubernetes.io/infra not exist")
}
if len(actual.Spec.Template.Spec.Tolerations) == 0 {
return fmt.Errorf("length of node selecor should not equals to 0")
}
for _, toleration := range actual.Spec.Template.Spec.Tolerations {
if toleration.Key == "node-role.kubernetes.io/infra" {
return nil
}
}
return fmt.Errorf("no key equals to node-role.kubernetes.io/infra")
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
})
ginkgo.It("Deployment should be reconciled when manually updated", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
registrationoDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
registrationoDeployment.Spec.Template.Spec.Containers[0].Image = "testimage2:latest"
_, err = detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Update(context.Background(), registrationoDeployment, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
registrationoDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" {
return fmt.Errorf("image should be testimage:latest, but get %s", registrationoDeployment.Spec.Template.Spec.Containers[0].Image)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
registrationDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
deploymentGeneration := helpers.NewGenerationStatus(appsv1.SchemeGroupVersion.WithResource("deployments"), registrationDeployment)
actualGeneration := helpers.FindGenerationStatus(actual.Status.Generations, deploymentGeneration)
if deploymentGeneration.LastGeneration != actualGeneration.LastGeneration {
return fmt.Errorf("expected LastGeneration shoud be %d, but get %d", actualGeneration.LastGeneration, deploymentGeneration.LastGeneration)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
})
})
ginkgo.Context("Cluster manager statuses", func() {
ginkgo.It("should have correct degraded conditions", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// The cluster manager should be unavailable at first
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "HubRegistrationDegraded", "UnavailableRegistrationPod", metav1.ConditionTrue)
// Update replica of deployment
registrationDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
registrationDeployment.Status.AvailableReplicas = 3
registrationDeployment.Status.Replicas = 3
registrationDeployment.Status.ReadyReplicas = 3
_, err = detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).UpdateStatus(context.Background(), registrationDeployment, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// The cluster manager should be functional at last
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "HubRegistrationDegraded", "RegistrationFunctional", metav1.ConditionFalse)
})
})
ginkgo.Context("Serving cert rotation", func() {
ginkgo.It("should rotate both serving cert and signing cert before they become expired", func() {
secretNames := []string{"signer-secret", "registration-webhook-serving-cert", "work-webhook-serving-cert"}
// wait until all secrets and configmap are in place
gomega.Eventually(func() error {
for _, name := range secretNames {
if _, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), name, metav1.GetOptions{}); err != nil {
return err
}
}
if _, err := detachedKubeClient.CoreV1().ConfigMaps(hubNamespaceDetached).Get(context.Background(), "ca-bundle-configmap", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// both serving cert and signing cert should aways be valid
gomega.Consistently(func() error {
configmap, err := detachedKubeClient.CoreV1().ConfigMaps(hubNamespaceDetached).Get(context.Background(), "ca-bundle-configmap", metav1.GetOptions{})
if err != nil {
return err
}
for _, name := range []string{"signer-secret", "registration-webhook-serving-cert", "work-webhook-serving-cert"} {
secret, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
certificates, err := cert.ParseCertsPEM(secret.Data["tls.crt"])
if err != nil {
return err
}
if len(certificates) == 0 {
return fmt.Errorf("certificates length equals to 0")
}
now := time.Now()
certificate := certificates[0]
if now.After(certificate.NotAfter) {
return fmt.Errorf("certificate after NotAfter")
}
if now.Before(certificate.NotBefore) {
return fmt.Errorf("certificate before NotBefore")
}
if name == "signer-secret" {
continue
}
// ensure signing cert of serving certs in the ca bundle configmap
caCerts, err := cert.ParseCertsPEM([]byte(configmap.Data["ca-bundle.crt"]))
if err != nil {
return err
}
found := false
for _, caCert := range caCerts {
if certificate.Issuer.CommonName != caCert.Subject.CommonName {
continue
}
if now.After(caCert.NotAfter) {
return fmt.Errorf("certificate after NotAfter")
}
if now.Before(caCert.NotBefore) {
return fmt.Errorf("certificate before NotBefore")
}
found = true
break
}
if !found {
return fmt.Errorf("not found")
}
}
return nil
}, eventuallyTimeout*3, eventuallyInterval*3).Should(gomega.BeNil())
})
})
})

View File

@@ -483,443 +483,3 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() {
})
})
})
var _ = ginkgo.Describe("ClusterManager Detached Mode", func() {
var cancel context.CancelFunc
var hubRegistrationDeployment = fmt.Sprintf("%s-registration-controller", clusterManagerName)
ginkgo.BeforeEach(func() {
var ctx context.Context
ctx, cancel = context.WithCancel(context.Background())
go startHubOperator(ctx, v1.InstallModeDetached)
})
ginkgo.AfterEach(func() {
if cancel != nil {
cancel()
}
})
ginkgo.Context("Deploy and clean hub component", func() {
ginkgo.It("should have expected resource created successfully", func() {
// Check namespace
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Namespaces().Get(context.Background(), hubNamespaceDetached, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check clusterrole/clusterrolebinding
hubRegistrationClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:controller", clusterManagerName)
hubRegistrationWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
hubWorkWebhookClusterRole := fmt.Sprintf("open-cluster-management:%s-registration:webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubRegistrationWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoles().Get(context.Background(), hubWorkWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubRegistrationClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubRegistrationWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), hubWorkWebhookClusterRole, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check service account
hubRegistrationSA := fmt.Sprintf("%s-registration-controller-sa", clusterManagerName)
hubRegistrationWebhookSA := fmt.Sprintf("%s-registration-webhook-sa", clusterManagerName)
hubWorkWebhookSA := fmt.Sprintf("%s-work-webhook-sa", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubRegistrationSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubRegistrationWebhookSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().ServiceAccounts(hubNamespaceDetached).Get(context.Background(), hubWorkWebhookSA, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check deployment
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
hubRegistrationWebhookDeployment := fmt.Sprintf("%s-registration-webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationWebhookDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
hubWorkWebhookDeployment := fmt.Sprintf("%s-work-webhook", clusterManagerName)
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubWorkWebhookDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check service
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Services(hubNamespaceDetached).Get(context.Background(), "cluster-manager-registration-webhook", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
gomega.Eventually(func() error {
if _, err := detachedKubeClient.CoreV1().Services(hubNamespaceDetached).Get(context.Background(), "cluster-manager-work-webhook", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check webhook secret
registrationWebhookSecret := "registration-webhook-serving-cert"
gomega.Eventually(func() error {
s, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), registrationWebhookSecret, metav1.GetOptions{})
if err != nil {
return err
}
if s.Data == nil {
return fmt.Errorf("s.Data is nil")
} else if s.Data["tls.crt"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.crt'")
} else if s.Data["tls.key"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.key'")
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
workWebhookSecret := "work-webhook-serving-cert"
gomega.Eventually(func() error {
s, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), workWebhookSecret, metav1.GetOptions{})
if err != nil {
return err
}
if s.Data == nil {
return fmt.Errorf("s.Data is nil")
} else if s.Data["tls.crt"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.crt'")
} else if s.Data["tls.key"] == nil {
return fmt.Errorf("s.Data doesn't contain key 'tls.key'")
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check validating webhook
registrationValidtingWebhook := "managedclustervalidators.admission.cluster.open-cluster-management.io"
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), registrationValidtingWebhook, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
workValidtingWebhook := "manifestworkvalidators.admission.work.open-cluster-management.io"
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), workValidtingWebhook, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "Applied", "ClusterManagerApplied", metav1.ConditionTrue)
})
ginkgo.It("Deployment should be updated when clustermanager is changed", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if actual.Generation != actual.Status.ObservedGeneration {
return fmt.Errorf("except generation to be %d, but got %d", actual.Status.ObservedGeneration, actual.Generation)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
clusterManager, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
clusterManager.Spec.RegistrationImagePullSpec = "testimage:latest"
_, err = detachedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
actual, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
if actual.Spec.Template.Spec.Containers[0].Image != "testimage:latest" {
return fmt.Errorf("expected image to be testimage:latest but get %s", actual.Spec.Template.Spec.Containers[0].Image)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if actual.Generation != actual.Status.ObservedGeneration {
return fmt.Errorf("except generation to be %d, but got %d", actual.Status.ObservedGeneration, actual.Generation)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if relatedResources are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
if len(actual.Status.RelatedResources) != 34 {
return fmt.Errorf("should get 34 relatedResources, actual got %v", len(actual.Status.RelatedResources))
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
})
ginkgo.It("Deployment should be added nodeSelector and toleration when add nodePlacement into clustermanager", func() {
clusterManager, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
clusterManager.Spec.NodePlacement = v1.NodePlacement{
NodeSelector: map[string]string{"node-role.kubernetes.io/infra": ""},
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/infra",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
},
}
_, err = detachedOperatorClient.OperatorV1().ClusterManagers().Update(context.Background(), clusterManager, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
actual, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
if len(actual.Spec.Template.Spec.NodeSelector) == 0 {
return fmt.Errorf("length of node selector should not equals to 0")
}
if _, ok := actual.Spec.Template.Spec.NodeSelector["node-role.kubernetes.io/infra"]; !ok {
return fmt.Errorf("node-role.kubernetes.io/infra not exist")
}
if len(actual.Spec.Template.Spec.Tolerations) == 0 {
return fmt.Errorf("length of node selecor should not equals to 0")
}
for _, toleration := range actual.Spec.Template.Spec.Tolerations {
if toleration.Key == "node-role.kubernetes.io/infra" {
return nil
}
}
return fmt.Errorf("no key equals to node-role.kubernetes.io/infra")
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
})
ginkgo.It("Deployment should be reconciled when manually updated", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
registrationoDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
registrationoDeployment.Spec.Template.Spec.Containers[0].Image = "testimage2:latest"
_, err = detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Update(context.Background(), registrationoDeployment, metav1.UpdateOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Eventually(func() error {
registrationoDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
if registrationoDeployment.Spec.Template.Spec.Containers[0].Image != "testimage:latest" {
return fmt.Errorf("image should be testimage:latest, but get %s", registrationoDeployment.Spec.Template.Spec.Containers[0].Image)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// Check if generations are correct
gomega.Eventually(func() error {
actual, err := detachedOperatorClient.OperatorV1().ClusterManagers().Get(context.Background(), clusterManagerName, metav1.GetOptions{})
if err != nil {
return err
}
registrationDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
if err != nil {
return err
}
deploymentGeneration := helpers.NewGenerationStatus(appsv1.SchemeGroupVersion.WithResource("deployments"), registrationDeployment)
actualGeneration := helpers.FindGenerationStatus(actual.Status.Generations, deploymentGeneration)
if deploymentGeneration.LastGeneration != actualGeneration.LastGeneration {
return fmt.Errorf("expected LastGeneration shoud be %d, but get %d", actualGeneration.LastGeneration, deploymentGeneration.LastGeneration)
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
})
})
ginkgo.Context("Cluster manager statuses", func() {
ginkgo.It("should have correct degraded conditions", func() {
gomega.Eventually(func() error {
if _, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// The cluster manager should be unavailable at first
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "HubRegistrationDegraded", "UnavailableRegistrationPod", metav1.ConditionTrue)
// Update replica of deployment
registrationDeployment, err := detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).Get(context.Background(), hubRegistrationDeployment, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
registrationDeployment.Status.AvailableReplicas = 3
registrationDeployment.Status.Replicas = 3
registrationDeployment.Status.ReadyReplicas = 3
_, err = detachedKubeClient.AppsV1().Deployments(hubNamespaceDetached).UpdateStatus(context.Background(), registrationDeployment, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
// The cluster manager should be functional at last
util.AssertClusterManagerCondition(clusterManagerName, detachedOperatorClient, "HubRegistrationDegraded", "RegistrationFunctional", metav1.ConditionFalse)
})
})
ginkgo.Context("Serving cert rotation", func() {
ginkgo.It("should rotate both serving cert and signing cert before they become expired", func() {
secretNames := []string{"signer-secret", "registration-webhook-serving-cert", "work-webhook-serving-cert"}
// wait until all secrets and configmap are in place
gomega.Eventually(func() error {
for _, name := range secretNames {
if _, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), name, metav1.GetOptions{}); err != nil {
return err
}
}
if _, err := detachedKubeClient.CoreV1().ConfigMaps(hubNamespaceDetached).Get(context.Background(), "ca-bundle-configmap", metav1.GetOptions{}); err != nil {
return err
}
return nil
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil())
// both serving cert and signing cert should aways be valid
gomega.Consistently(func() error {
configmap, err := detachedKubeClient.CoreV1().ConfigMaps(hubNamespaceDetached).Get(context.Background(), "ca-bundle-configmap", metav1.GetOptions{})
if err != nil {
return err
}
for _, name := range []string{"signer-secret", "registration-webhook-serving-cert", "work-webhook-serving-cert"} {
secret, err := detachedKubeClient.CoreV1().Secrets(hubNamespaceDetached).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
certificates, err := cert.ParseCertsPEM(secret.Data["tls.crt"])
if err != nil {
return err
}
if len(certificates) == 0 {
return fmt.Errorf("certificates length equals to 0")
}
now := time.Now()
certificate := certificates[0]
if now.After(certificate.NotAfter) {
return fmt.Errorf("certificate after NotAfter")
}
if now.Before(certificate.NotBefore) {
return fmt.Errorf("certificate before NotBefore")
}
if name == "signer-secret" {
continue
}
// ensure signing cert of serving certs in the ca bundle configmap
caCerts, err := cert.ParseCertsPEM([]byte(configmap.Data["ca-bundle.crt"]))
if err != nil {
return err
}
found := false
for _, caCert := range caCerts {
if certificate.Issuer.CommonName != caCert.Subject.CommonName {
continue
}
if now.After(caCert.NotAfter) {
return fmt.Errorf("certificate after NotAfter")
}
if now.Before(caCert.NotBefore) {
return fmt.Errorf("certificate before NotBefore")
}
found = true
break
}
if !found {
return fmt.Errorf("not found")
}
}
return nil
}, eventuallyTimeout*3, eventuallyInterval*3).Should(gomega.BeNil())
})
})
})

View File

@@ -150,6 +150,16 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) {
PlacementImagePullSpec: "quay.io/open-cluster-management/placement",
DeployOption: operatorapiv1.ClusterManagerDeployOption{
Mode: operatorapiv1.InstallModeDetached,
Detached: &operatorapiv1.DetachedClusterManagerConfiguration{
RegistrationWebhookConfiguration: operatorapiv1.WebhookConfiguration{
Address: "localhost",
Port: 443,
},
WorkWebhookConfiguration: operatorapiv1.WebhookConfiguration{
Address: "localhost",
Port: 443,
},
},
},
},
}, metav1.CreateOptions{})
@@ -161,7 +171,8 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) {
ctx, c := context.WithCancel(context.TODO())
cancel = c
go ServiceAccountCtl(ctx)
go ServiceAccountCtl(ctx, kubeClient)
go ServiceAccountCtl(ctx, detachedKubeClient)
close(done)
}, 60)
@@ -181,8 +192,8 @@ var _ = ginkgo.AfterSuite(func() {
})
// ServiceAccountCtl watch service accounts and create a corresponding secret for it.
func ServiceAccountCtl(ctx context.Context) {
w, err := detachedKubeClient.CoreV1().ServiceAccounts("").Watch(ctx, metav1.ListOptions{})
func ServiceAccountCtl(ctx context.Context, kubeClient kubernetes.Interface) {
w, err := kubeClient.CoreV1().ServiceAccounts("").Watch(ctx, metav1.ListOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
klog.Infof("service account controller start")
@@ -219,14 +230,14 @@ func ServiceAccountCtl(ctx context.Context) {
Type: corev1.SecretTypeServiceAccountToken,
}
_, err = detachedKubeClient.CoreV1().Secrets(sa.Namespace).Create(ctx, secret, metav1.CreateOptions{})
_, err = kubeClient.CoreV1().Secrets(sa.Namespace).Create(ctx, secret, metav1.CreateOptions{})
if errors.IsAlreadyExists(err) {
klog.Infof("secret %s/%s already exist", secret.Namespace, secret.Name)
} else {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}
_, err = detachedKubeClient.CoreV1().Secrets(sa.Namespace).Get(ctx, secret.Name, metav1.GetOptions{})
_, err = kubeClient.CoreV1().Secrets(sa.Namespace).Get(ctx, secret.Name, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err := retry.OnError(retry.DefaultBackoff,
@@ -234,7 +245,7 @@ func ServiceAccountCtl(ctx context.Context) {
return true
},
func() error {
serviceAccount, err := detachedKubeClient.CoreV1().ServiceAccounts(sa.Namespace).Get(ctx, sa.Name, metav1.GetOptions{})
serviceAccount, err := kubeClient.CoreV1().ServiceAccounts(sa.Namespace).Get(ctx, sa.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -244,7 +255,7 @@ func ServiceAccountCtl(ctx context.Context) {
Name: secret.Name,
},
}
_, err = detachedKubeClient.CoreV1().ServiceAccounts(serviceAccount.Namespace).Update(ctx, serviceAccount, metav1.UpdateOptions{})
_, err = kubeClient.CoreV1().ServiceAccounts(serviceAccount.Namespace).Update(ctx, serviceAccount, metav1.UpdateOptions{})
return err
})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

2
vendor/modules.txt vendored
View File

@@ -235,7 +235,7 @@ github.com/openshift/build-machinery-go/make/targets/golang
github.com/openshift/build-machinery-go/make/targets/openshift
github.com/openshift/build-machinery-go/make/targets/openshift/operator
github.com/openshift/build-machinery-go/scripts
# github.com/openshift/library-go v0.0.0-20210407110438-80b76d711afb
# github.com/openshift/library-go v0.0.0-20210407140145-f831e911c638
## explicit; go 1.15
github.com/openshift/library-go/pkg/assets
github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer