mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-05-17 06:37:48 +00:00
Add spoke operator
This commit is contained in:
@@ -9,10 +9,10 @@ import (
|
||||
"github.com/open-cluster-management/nucleus/pkg/version"
|
||||
)
|
||||
|
||||
// NewOperator generatee a command to start workload agent
|
||||
// NewOperatorCmd generatee a command to start workload agent
|
||||
func NewOperatorCmd() *cobra.Command {
|
||||
cmd := controllercmd.
|
||||
NewControllerCommandConfig("operator", version.Get(), operators.RunNucleusOperator).
|
||||
NewControllerCommandConfig("nucleus-operator", version.Get(), operators.RunNucleusOperator).
|
||||
NewCommand()
|
||||
cmd.Use = "operator"
|
||||
cmd.Short = "Start the nucleus operator"
|
||||
|
||||
@@ -96,3 +96,53 @@ func UpdateNucleusHubConditionFn(conds ...nucleusapiv1.StatusCondition) UpdateNu
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type UpdateNucleusSpokeStatusFunc func(status *nucleusapiv1.SpokeCoreStatus) error
|
||||
|
||||
func UpdateNucleusSpokeStatus(
|
||||
ctx context.Context,
|
||||
client nucleusv1client.SpokeCoreInterface,
|
||||
nucleusSpokeCoreName string,
|
||||
updateFuncs ...UpdateNucleusSpokeStatusFunc) (*nucleusapiv1.SpokeCoreStatus, bool, error) {
|
||||
updated := false
|
||||
var updatedSpokeClusterStatus *nucleusapiv1.SpokeCoreStatus
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
spokeCore, err := client.Get(ctx, nucleusSpokeCoreName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldStatus := &spokeCore.Status
|
||||
|
||||
newStatus := oldStatus.DeepCopy()
|
||||
for _, update := range updateFuncs {
|
||||
if err := update(newStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if equality.Semantic.DeepEqual(oldStatus, newStatus) {
|
||||
// We return the newStatus which is a deep copy of oldStatus but with all update funcs applied.
|
||||
updatedSpokeClusterStatus = newStatus
|
||||
return nil
|
||||
}
|
||||
|
||||
spokeCore.Status = *newStatus
|
||||
updatedSpokeCluster, err := client.UpdateStatus(ctx, spokeCore, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedSpokeClusterStatus = &updatedSpokeCluster.Status
|
||||
updated = err == nil
|
||||
return err
|
||||
})
|
||||
|
||||
return updatedSpokeClusterStatus, updated, err
|
||||
}
|
||||
|
||||
func UpdateNucleusSpokeConditionFn(conds ...nucleusapiv1.StatusCondition) UpdateNucleusSpokeStatusFunc {
|
||||
return func(oldStatus *nucleusapiv1.SpokeCoreStatus) error {
|
||||
for _, cond := range conds {
|
||||
SetNucleusCondition(&oldStatus.Conditions, cond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,14 +73,22 @@ func TestUpdateStatusCondition(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fakeClusterClient := nucleusfake.NewSimpleClientset(&nucleusapiv1.HubCore{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testspokecluster"},
|
||||
Status: nucleusapiv1.HubCoreStatus{
|
||||
Conditions: c.startingConditions,
|
||||
fakeClusterClient := nucleusfake.NewSimpleClientset(
|
||||
&nucleusapiv1.HubCore{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testspokecluster"},
|
||||
Status: nucleusapiv1.HubCoreStatus{
|
||||
Conditions: c.startingConditions,
|
||||
},
|
||||
},
|
||||
})
|
||||
&nucleusapiv1.SpokeCore{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testspokecluster"},
|
||||
Status: nucleusapiv1.SpokeCoreStatus{
|
||||
Conditions: c.startingConditions,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
status, updated, err := UpdateNucleusHubStatus(
|
||||
hubstatus, updated, err := UpdateNucleusHubStatus(
|
||||
context.TODO(),
|
||||
fakeClusterClient.NucleusV1().HubCores(),
|
||||
"testspokecluster",
|
||||
@@ -92,14 +100,36 @@ func TestUpdateStatusCondition(t *testing.T) {
|
||||
if updated != c.expextedUpdated {
|
||||
t.Errorf("expected %t, but %t", c.expextedUpdated, updated)
|
||||
}
|
||||
|
||||
spokestatus, updated, err := UpdateNucleusSpokeStatus(
|
||||
context.TODO(),
|
||||
fakeClusterClient.NucleusV1().SpokeCores(),
|
||||
"testspokecluster",
|
||||
UpdateNucleusSpokeConditionFn(c.newCondition),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
}
|
||||
if updated != c.expextedUpdated {
|
||||
t.Errorf("expected %t, but %t", c.expextedUpdated, updated)
|
||||
}
|
||||
|
||||
for i := range c.expectedConditions {
|
||||
expected := c.expectedConditions[i]
|
||||
actual := status.Conditions[i]
|
||||
hubactual := hubstatus.Conditions[i]
|
||||
if expected.LastTransitionTime == (metav1.Time{}) {
|
||||
actual.LastTransitionTime = metav1.Time{}
|
||||
hubactual.LastTransitionTime = metav1.Time{}
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(expected, actual) {
|
||||
t.Errorf(diff.ObjectDiff(expected, actual))
|
||||
if !equality.Semantic.DeepEqual(expected, hubactual) {
|
||||
t.Errorf(diff.ObjectDiff(expected, hubactual))
|
||||
}
|
||||
|
||||
spokeactual := spokestatus.Conditions[i]
|
||||
if expected.LastTransitionTime == (metav1.Time{}) {
|
||||
spokeactual.LastTransitionTime = metav1.Time{}
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(expected, spokeactual) {
|
||||
t.Errorf(diff.ObjectDiff(expected, spokeactual))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by go-bindata.
|
||||
// sources:
|
||||
// manifests/hub/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml
|
||||
// manifests/hub/0000_00_work.open-cluster-management.io_workloads.crd.yaml
|
||||
// manifests/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml
|
||||
// manifests/hub/hub-clusterrole.yaml
|
||||
// manifests/hub/hub-clusterrolebinding.yaml
|
||||
// manifests/hub/hub-deployment.yaml
|
||||
@@ -201,7 +201,7 @@ func manifestsHub0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml()
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1beta1
|
||||
var _manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
@@ -253,7 +253,11 @@ spec:
|
||||
to be deployed on the spoke cluster.
|
||||
type: array
|
||||
items:
|
||||
description: Manifest represents a resource to be deployed on
|
||||
spoke cluster
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
x-kubernetes-embedded-resource: true
|
||||
status:
|
||||
description: Status represents the current status of work
|
||||
type: object
|
||||
@@ -385,17 +389,17 @@ status:
|
||||
storedVersions: []
|
||||
`)
|
||||
|
||||
func manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYamlBytes() ([]byte, error) {
|
||||
return _manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYaml, nil
|
||||
func manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYamlBytes() ([]byte, error) {
|
||||
return _manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, nil
|
||||
}
|
||||
|
||||
func manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYaml() (*asset, error) {
|
||||
bytes, err := manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYamlBytes()
|
||||
func manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYaml() (*asset, error) {
|
||||
bytes, err := manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYamlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "manifests/hub/0000_00_work.open-cluster-management.io_workloads.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
info := bindataFileInfo{name: "manifests/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
@@ -627,7 +631,7 @@ func AssetNames() []string {
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"manifests/hub/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml": manifestsHub0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml,
|
||||
"manifests/hub/0000_00_work.open-cluster-management.io_workloads.crd.yaml": manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYaml,
|
||||
"manifests/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYaml,
|
||||
"manifests/hub/hub-clusterrole.yaml": manifestsHubHubClusterroleYaml,
|
||||
"manifests/hub/hub-clusterrolebinding.yaml": manifestsHubHubClusterrolebindingYaml,
|
||||
"manifests/hub/hub-deployment.yaml": manifestsHubHubDeploymentYaml,
|
||||
@@ -679,7 +683,7 @@ var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"manifests": {nil, map[string]*bintree{
|
||||
"hub": {nil, map[string]*bintree{
|
||||
"0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml": {manifestsHub0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml, map[string]*bintree{}},
|
||||
"0000_00_work.open-cluster-management.io_workloads.crd.yaml": {manifestsHub0000_00_workOpenClusterManagementIo_workloadsCrdYaml, map[string]*bintree{}},
|
||||
"0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": {manifestsHub0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, map[string]*bintree{}},
|
||||
"hub-clusterrole.yaml": {manifestsHubHubClusterroleYaml, map[string]*bintree{}},
|
||||
"hub-clusterrolebinding.yaml": {manifestsHubHubClusterrolebindingYaml, map[string]*bintree{}},
|
||||
"hub-deployment.yaml": {manifestsHubHubDeploymentYaml, map[string]*bintree{}},
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -77,7 +78,10 @@ func NewNucleusHubController(
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
ResyncEvery(3*time.Minute).
|
||||
WithInformers(nucleusInformer.Informer()).
|
||||
WithInformersQueueKeyFunc(func(obj runtime.Object) string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
return accessor.GetName()
|
||||
}, nucleusInformer.Informer()).
|
||||
ToController("NucleusHubController", recorder)
|
||||
}
|
||||
|
||||
@@ -93,6 +97,10 @@ func (n *nucleusHubController) sync(ctx context.Context, controllerContext facto
|
||||
klog.V(4).Infof("Reconciling HubCore %q", hubCoreName)
|
||||
|
||||
hubCore, err := n.nucleusLister.Get(hubCoreName)
|
||||
if errors.IsNotFound(err) {
|
||||
// HubCore not found, could have been deleted, do nothing.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,7 +145,7 @@ func (n *nucleusHubController) sync(ctx context.Context, controllerContext facto
|
||||
return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join("", name)), config).Data, nil
|
||||
},
|
||||
"manifests/hub/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml",
|
||||
"manifests/hub/0000_00_work.open-cluster-management.io_workloads.crd.yaml",
|
||||
"manifests/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml",
|
||||
"manifests/hub/hub-clusterrole.yaml",
|
||||
"manifests/hub/hub-clusterrolebinding.yaml",
|
||||
"manifests/hub/hub-namespace.yaml",
|
||||
|
||||
@@ -40,11 +40,15 @@ func RunNucleusOperator(ctx context.Context, controllerContext *controllercmd.Co
|
||||
nucleusClient.NucleusV1().HubCores(),
|
||||
nucleusInformer.Nucleus().V1().HubCores(),
|
||||
controllerContext.EventRecorder)
|
||||
agentController := spoke.NewNucleusAgentController(kubeClient, controllerContext.EventRecorder)
|
||||
spokeController := spoke.NewNucleusSpokeController(
|
||||
kubeClient,
|
||||
nucleusClient.NucleusV1().SpokeCores(),
|
||||
nucleusInformer.Nucleus().V1().SpokeCores(),
|
||||
controllerContext.EventRecorder)
|
||||
|
||||
go nucleusInformer.Start(ctx.Done())
|
||||
go hubcontroller.Run(ctx, 1)
|
||||
go agentController.Run(ctx, 1)
|
||||
go spokeController.Run(ctx, 1)
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
393
pkg/operators/spoke/bindata/bindata.go
Normal file
393
pkg/operators/spoke/bindata/bindata.go
Normal file
@@ -0,0 +1,393 @@
|
||||
// Code generated by go-bindata.
|
||||
// sources:
|
||||
// manifests/spoke/spoke-clusterrolebinding.yaml
|
||||
// manifests/spoke/spoke-registration-deployment.yaml
|
||||
// manifests/spoke/spoke-serviceaccount.yaml
|
||||
// manifests/spoke/spoke-work-deployment.yaml
|
||||
// DO NOT EDIT!
|
||||
|
||||
package bindata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _manifestsSpokeSpokeClusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:open-cluster-management:{{ .SpokeCoreName }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .SpokeCoreName }}-sa
|
||||
namespace: {{ .SpokeCoreNamespace }}
|
||||
`)
|
||||
|
||||
func manifestsSpokeSpokeClusterrolebindingYamlBytes() ([]byte, error) {
|
||||
return _manifestsSpokeSpokeClusterrolebindingYaml, nil
|
||||
}
|
||||
|
||||
func manifestsSpokeSpokeClusterrolebindingYaml() (*asset, error) {
|
||||
bytes, err := manifestsSpokeSpokeClusterrolebindingYamlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "manifests/spoke/spoke-clusterrolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _manifestsSpokeSpokeRegistrationDeploymentYaml = []byte(`kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: {{ .SpokeCoreName }}-registration-agent
|
||||
namespace: {{ .SpokeCoreNamespace }}
|
||||
labels:
|
||||
app: spoke-registration-agent
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: spoke-registration-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: spoke-registration-agent
|
||||
spec:
|
||||
serviceAccountName: {{ .SpokeCoreName }}-sa
|
||||
containers:
|
||||
- name: spoke-agent
|
||||
image: {{ .RegistrationImage }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/registration"
|
||||
- "agent"
|
||||
- "--cluster-name={{ .ClusterName }}"
|
||||
- "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig"
|
||||
- "--spoke-external-server-url={{ .ExternalServerURL }}"
|
||||
volumeMounts:
|
||||
- name: bootstrap-secret
|
||||
mountPath: "/spoke/bootstrap"
|
||||
readOnly: true
|
||||
- name: hub-kubeconfig-secret
|
||||
mountPath: "/spoke/hub-kubeconfig"
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 443
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 443
|
||||
initialDelaySeconds: 2
|
||||
volumes:
|
||||
- name: bootstrap-secret
|
||||
secret:
|
||||
secretName: {{ .BootStrapKubeConfigSecret }}
|
||||
- name: hub-kubeconfig-secret
|
||||
secret:
|
||||
secretName: {{ .HubKubeConfigSecret }}
|
||||
`)
|
||||
|
||||
func manifestsSpokeSpokeRegistrationDeploymentYamlBytes() ([]byte, error) {
|
||||
return _manifestsSpokeSpokeRegistrationDeploymentYaml, nil
|
||||
}
|
||||
|
||||
func manifestsSpokeSpokeRegistrationDeploymentYaml() (*asset, error) {
|
||||
bytes, err := manifestsSpokeSpokeRegistrationDeploymentYamlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "manifests/spoke/spoke-registration-deployment.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _manifestsSpokeSpokeServiceaccountYaml = []byte(`apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .SpokeCoreName }}-sa
|
||||
namespace: {{ .SpokeCoreNamespace }}
|
||||
`)
|
||||
|
||||
func manifestsSpokeSpokeServiceaccountYamlBytes() ([]byte, error) {
|
||||
return _manifestsSpokeSpokeServiceaccountYaml, nil
|
||||
}
|
||||
|
||||
func manifestsSpokeSpokeServiceaccountYaml() (*asset, error) {
|
||||
bytes, err := manifestsSpokeSpokeServiceaccountYamlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "manifests/spoke/spoke-serviceaccount.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _manifestsSpokeSpokeWorkDeploymentYaml = []byte(`kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: {{ .SpokeCoreName }}-work-agent
|
||||
namespace: {{ .SpokeCoreNamespace }}
|
||||
labels:
|
||||
app: spoke-work-agent
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: spoke-work-agent
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: spoke-work-agent
|
||||
spec:
|
||||
serviceAccountName: {{ .SpokeCoreName }}-sa
|
||||
containers:
|
||||
- name: spoke-agent
|
||||
image: {{ .WorkImage }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "/work"
|
||||
- "agent"
|
||||
- "--cluster-name={{ .ClusterName }}"
|
||||
- "--kubeconfig=/spoke/hub-kubeconfig"
|
||||
volumeMounts:
|
||||
- name: hub-kubeconfig-secret
|
||||
mountPath: "/spoke/hub-kubeconfig"
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 443
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 443
|
||||
initialDelaySeconds: 2
|
||||
volumes:
|
||||
- name: hub-kubeconfig-secret
|
||||
secret:
|
||||
secretName: {{ .HubKubeConfigSecret }}
|
||||
`)
|
||||
|
||||
func manifestsSpokeSpokeWorkDeploymentYamlBytes() ([]byte, error) {
|
||||
return _manifestsSpokeSpokeWorkDeploymentYaml, nil
|
||||
}
|
||||
|
||||
func manifestsSpokeSpokeWorkDeploymentYaml() (*asset, error) {
|
||||
bytes, err := manifestsSpokeSpokeWorkDeploymentYamlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "manifests/spoke/spoke-work-deployment.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"manifests/spoke/spoke-clusterrolebinding.yaml": manifestsSpokeSpokeClusterrolebindingYaml,
|
||||
"manifests/spoke/spoke-registration-deployment.yaml": manifestsSpokeSpokeRegistrationDeploymentYaml,
|
||||
"manifests/spoke/spoke-serviceaccount.yaml": manifestsSpokeSpokeServiceaccountYaml,
|
||||
"manifests/spoke/spoke-work-deployment.yaml": manifestsSpokeSpokeWorkDeploymentYaml,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"manifests": {nil, map[string]*bintree{
|
||||
"spoke": {nil, map[string]*bintree{
|
||||
"spoke-clusterrolebinding.yaml": {manifestsSpokeSpokeClusterrolebindingYaml, map[string]*bintree{}},
|
||||
"spoke-registration-deployment.yaml": {manifestsSpokeSpokeRegistrationDeploymentYaml, map[string]*bintree{}},
|
||||
"spoke-serviceaccount.yaml": {manifestsSpokeSpokeServiceaccountYaml, map[string]*bintree{}},
|
||||
"spoke-work-deployment.yaml": {manifestsSpokeSpokeWorkDeploymentYaml, map[string]*bintree{}},
|
||||
}},
|
||||
}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
}
|
||||
@@ -2,28 +2,395 @@ package spoke
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"github.com/openshift/api"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
|
||||
"github.com/openshift/library-go/pkg/assets"
|
||||
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
|
||||
operatorhelpers "github.com/openshift/library-go/pkg/operator/v1helpers"
|
||||
|
||||
nucleusv1client "github.com/open-cluster-management/api/client/nucleus/clientset/versioned/typed/nucleus/v1"
|
||||
nucleusinformer "github.com/open-cluster-management/api/client/nucleus/informers/externalversions/nucleus/v1"
|
||||
nucleuslister "github.com/open-cluster-management/api/client/nucleus/listers/nucleus/v1"
|
||||
nucleusapiv1 "github.com/open-cluster-management/api/nucleus/v1"
|
||||
"github.com/open-cluster-management/nucleus/pkg/helpers"
|
||||
"github.com/open-cluster-management/nucleus/pkg/operators/spoke/bindata"
|
||||
)
|
||||
|
||||
type nucleusAgentController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
const (
|
||||
nucleusSpokeFinalizer = "nucleus.open-cluster-management.io/spoke-core-cleanup"
|
||||
bootstrapHubKubeConfigSecret = "bootstrap-hub-kubeconfig"
|
||||
hubKubeConfigSecret = "hub-kubeconfig"
|
||||
nucluesSpokeCoreNamespace = "open-cluster-management"
|
||||
spokeCoreApplied = "Applied"
|
||||
)
|
||||
|
||||
var (
|
||||
genericScheme = runtime.NewScheme()
|
||||
genericCodecs = serializer.NewCodecFactory(genericScheme)
|
||||
genericCodec = genericCodecs.UniversalDeserializer()
|
||||
)
|
||||
|
||||
type nucleusSpokeController struct {
|
||||
nucleusClient nucleusv1client.SpokeCoreInterface
|
||||
nucleusLister nucleuslister.SpokeCoreLister
|
||||
kubeClient kubernetes.Interface
|
||||
registrationGeneration int64
|
||||
workGeneration int64
|
||||
}
|
||||
|
||||
// NewNucleusAgentController construct nucleus agent controller
|
||||
func NewNucleusAgentController(
|
||||
func init() {
|
||||
utilruntime.Must(api.InstallKube(genericScheme))
|
||||
}
|
||||
|
||||
// NewNucleusSpokeController construct nucleus spoke controller
|
||||
func NewNucleusSpokeController(
|
||||
kubeClient kubernetes.Interface,
|
||||
nucleusClient nucleusv1client.SpokeCoreInterface,
|
||||
nucleusInformer nucleusinformer.SpokeCoreInformer,
|
||||
recorder events.Recorder) factory.Controller {
|
||||
controller := &nucleusAgentController{
|
||||
kubeClient: kubeClient,
|
||||
controller := &nucleusSpokeController{
|
||||
kubeClient: kubeClient,
|
||||
nucleusClient: nucleusClient,
|
||||
nucleusLister: nucleusInformer.Lister(),
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).ToController("NucleusHubController", recorder)
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(func(obj runtime.Object) string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
return accessor.GetName()
|
||||
}, nucleusInformer.Informer()).
|
||||
ToController("NucleusSpokeController", recorder)
|
||||
}
|
||||
|
||||
func (m *nucleusAgentController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
// spokeConfig is used to render the template of hub manifests
|
||||
type spokeConfig struct {
|
||||
SpokeCoreName string
|
||||
SpokeCoreNamespace string
|
||||
RegistrationImage string
|
||||
WorkImage string
|
||||
ClusterName string
|
||||
ExternalServerURL string
|
||||
HubKubeConfigSecret string
|
||||
BootStrapKubeConfigSecret string
|
||||
}
|
||||
|
||||
func (n *nucleusSpokeController) sync(ctx context.Context, controllerContext factory.SyncContext) error {
|
||||
spokeCoreName := controllerContext.QueueKey()
|
||||
klog.V(4).Infof("Reconciling SpokeCore %q", spokeCoreName)
|
||||
spokeCore, err := n.nucleusLister.Get(spokeCoreName)
|
||||
if errors.IsNotFound(err) {
|
||||
// AgentCore not found, could have been deleted, do nothing.
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spokeCore = spokeCore.DeepCopy()
|
||||
|
||||
config := spokeConfig{
|
||||
SpokeCoreName: spokeCore.Name,
|
||||
SpokeCoreNamespace: spokeCore.Spec.Namespace,
|
||||
RegistrationImage: spokeCore.Spec.RegistrationImagePullSpec,
|
||||
WorkImage: spokeCore.Spec.WorkImagePullSpec,
|
||||
ClusterName: spokeCore.Spec.ClusterName,
|
||||
BootStrapKubeConfigSecret: bootstrapHubKubeConfigSecret,
|
||||
HubKubeConfigSecret: hubKubeConfigSecret,
|
||||
}
|
||||
// If namespace is not set, use the default namespace
|
||||
if config.SpokeCoreNamespace == "" {
|
||||
config.SpokeCoreNamespace = nucluesSpokeCoreNamespace
|
||||
}
|
||||
|
||||
// Update finalizer at first
|
||||
if spokeCore.DeletionTimestamp.IsZero() {
|
||||
hasFinalizer := false
|
||||
for i := range spokeCore.Finalizers {
|
||||
if spokeCore.Finalizers[i] == nucleusSpokeFinalizer {
|
||||
hasFinalizer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasFinalizer {
|
||||
spokeCore.Finalizers = append(spokeCore.Finalizers, nucleusSpokeFinalizer)
|
||||
_, err := n.nucleusClient.Update(ctx, spokeCore, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// SpokeCore is deleting, we remove its related resources on spoke
|
||||
if !spokeCore.DeletionTimestamp.IsZero() {
|
||||
if err := n.cleanUp(ctx, controllerContext, config); err != nil {
|
||||
return err
|
||||
}
|
||||
return n.removeWorkFinalizer(ctx, spokeCore)
|
||||
}
|
||||
|
||||
// Start deploy spoke core components
|
||||
// Check if namespace exists
|
||||
_, err = n.kubeClient.CoreV1().Namespaces().Get(ctx, config.SpokeCoreNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to get namespace %q", config.SpokeCoreNamespace),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if bootstrap secret exists
|
||||
_, err = n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Get(
|
||||
ctx, config.BootStrapKubeConfigSecret, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to get bootstracp secret %s/%s", config.SpokeCoreNamespace, config.BootStrapKubeConfigSecret),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
|
||||
// Deploy the static resources
|
||||
// Apply static files
|
||||
resourceResults := resourceapply.ApplyDirectly(
|
||||
resourceapply.NewKubeClientHolder(n.kubeClient),
|
||||
controllerContext.Recorder(),
|
||||
func(name string) ([]byte, error) {
|
||||
return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join("", name)), config).Data, nil
|
||||
},
|
||||
"manifests/spoke/spoke-clusterrolebinding.yaml",
|
||||
"manifests/spoke/spoke-serviceaccount.yaml",
|
||||
)
|
||||
errs := []error{}
|
||||
for _, result := range resourceResults {
|
||||
if result.Error != nil {
|
||||
errs = append(errs, fmt.Errorf("%q (%T): %v", result.File, result.Type, result.Error))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
appleErros := operatorhelpers.NewMultiLineAggregate(errs)
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: appleErros.Error(),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return appleErros
|
||||
}
|
||||
|
||||
// Create hub config secret
|
||||
hubSecret, err := n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Get(
|
||||
ctx, hubKubeConfigSecret, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// Craete an empty secret with placeholder
|
||||
hubSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hubKubeConfigSecret,
|
||||
Namespace: config.SpokeCoreNamespace,
|
||||
},
|
||||
Data: map[string][]byte{"placeholder": []byte("placeholder")},
|
||||
}
|
||||
hubSecret, err = n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Create(ctx, hubSecret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to get hub kubeconfig secret with error %v", err),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
|
||||
// Deploy registration agent
|
||||
generation, err := n.applyDeployment(
|
||||
config, "manifests/spoke/spoke-registration-deployment.yaml", n.registrationGeneration, controllerContext)
|
||||
if err != nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to deploy registration deployment with error %v", err),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
n.registrationGeneration = generation
|
||||
|
||||
// If cluster name is empty, read cluster name from hub config secret
|
||||
if config.ClusterName == "" {
|
||||
clusterName := hubSecret.Data["cluster-name"]
|
||||
if clusterName == nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to get cluster name"),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return fmt.Errorf("Failed to get cluster name")
|
||||
}
|
||||
config.ClusterName = string(clusterName)
|
||||
}
|
||||
|
||||
// If hub kubeconfig does not exist, return err.
|
||||
if hubSecret.Data["kubeconfig"] == nil {
|
||||
klog.Infof("data is %#v", hubSecret.Data)
|
||||
return fmt.Errorf("Failed to get kubeconfig from hub kubeconfig secret")
|
||||
}
|
||||
|
||||
// Deploy work agent
|
||||
generation, err = n.applyDeployment(
|
||||
config, "manifests/spoke/spoke-work-deployment.yaml", n.workGeneration, controllerContext)
|
||||
if err != nil {
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SpokeCoreApplyFailed",
|
||||
Message: fmt.Sprintf("Failed to deploy work deployment with error %v", err),
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
n.workGeneration = generation
|
||||
|
||||
// Update status
|
||||
helpers.SetNucleusCondition(&spokeCore.Status.Conditions, nucleusapiv1.StatusCondition{
|
||||
Type: spokeCoreApplied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "SpokeCoreApplied",
|
||||
Message: "Spoke Core Component Applied",
|
||||
})
|
||||
helpers.UpdateNucleusSpokeStatus(
|
||||
ctx, n.nucleusClient, spokeCoreName, helpers.UpdateNucleusSpokeConditionFn(spokeCore.Status.Conditions...))
|
||||
return err
|
||||
}
|
||||
|
||||
func (n *nucleusSpokeController) applyDeployment(
|
||||
config spokeConfig, file string, generation int64, controllerContext factory.SyncContext) (int64, error) {
|
||||
deploymentRaw := assets.MustCreateAssetFromTemplate(
|
||||
file,
|
||||
bindata.MustAsset(filepath.Join("", file)), config).Data
|
||||
deployment, _, err := genericCodec.Decode(deploymentRaw, nil, nil)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%q: %v", file, err)
|
||||
}
|
||||
updatedDeployment, updated, err := resourceapply.ApplyDeployment(
|
||||
n.kubeClient.AppsV1(),
|
||||
controllerContext.Recorder(),
|
||||
deployment.(*appsv1.Deployment), generation, false)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to apply hub deployment manifest: %v", err)
|
||||
return 0, fmt.Errorf("%q (%T): %v", file, deployment, err)
|
||||
}
|
||||
|
||||
// Record the generation, so the deployment is only updated when generation is changed.
|
||||
if updated {
|
||||
generation = updatedDeployment.ObjectMeta.Generation
|
||||
}
|
||||
|
||||
return generation, nil
|
||||
}
|
||||
|
||||
func (n *nucleusSpokeController) cleanUp(ctx context.Context, controllerContext factory.SyncContext, config spokeConfig) error {
|
||||
// Remove deployment
|
||||
registrationDeployment := fmt.Sprintf("%s-registration-agent", config.SpokeCoreName)
|
||||
err := n.kubeClient.AppsV1().Deployments(config.SpokeCoreNamespace).Delete(ctx, registrationDeployment, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
controllerContext.Recorder().Eventf("DeploymentDeleted", "deployment %s is deleted", registrationDeployment)
|
||||
workDeployment := fmt.Sprintf("%s-work-agent", config.SpokeCoreName)
|
||||
err = n.kubeClient.AppsV1().Deployments(config.SpokeCoreNamespace).Delete(ctx, workDeployment, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
// Remove secret
|
||||
err = n.kubeClient.CoreV1().Secrets(config.SpokeCoreNamespace).Delete(ctx, config.HubKubeConfigSecret, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
controllerContext.Recorder().Eventf("SecretDeleted", "secret %s is deleted", config.HubKubeConfigSecret)
|
||||
// Remove service account
|
||||
serviceAccountName := fmt.Sprintf("%s-sa", config.SpokeCoreName)
|
||||
err = n.kubeClient.CoreV1().ServiceAccounts(config.SpokeCoreNamespace).Delete(ctx, serviceAccountName, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
controllerContext.Recorder().Eventf("ServiceAccountDeleted", "serviceaccoount %s is deleted", serviceAccountName)
|
||||
// Remove clusterrolebinding
|
||||
clusterRoleBindingName := fmt.Sprintf("system:open-cluster-management:%s", config.SpokeCoreName)
|
||||
err = n.kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBindingName, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
controllerContext.Recorder().Eventf("ClusterRoleBindingDeleted", "clusterrole %s is deleted", clusterRoleBindingName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nucleusSpokeController) removeWorkFinalizer(ctx context.Context, deploy *nucleusapiv1.SpokeCore) error {
|
||||
copiedFinalizers := []string{}
|
||||
for i := range deploy.Finalizers {
|
||||
if deploy.Finalizers[i] == nucleusSpokeFinalizer {
|
||||
continue
|
||||
}
|
||||
copiedFinalizers = append(copiedFinalizers, deploy.Finalizers[i])
|
||||
}
|
||||
|
||||
if len(deploy.Finalizers) != len(copiedFinalizers) {
|
||||
deploy.Finalizers = copiedFinalizers
|
||||
_, err := n.nucleusClient.Update(ctx, deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readClusterNameFromSecret(secret *corev1.Secret) (string, error) {
|
||||
if secret.Data["cluster-name"] == nil {
|
||||
return "", fmt.Errorf("Unable to find cluster name in secret")
|
||||
}
|
||||
|
||||
return string(secret.Data["cluster-name"]), nil
|
||||
}
|
||||
|
||||
func readKubuConfigFromSecret(secret *corev1.Secret, config spokeConfig) (string, error) {
|
||||
if secret.Data["cluster-name"] == nil {
|
||||
return "", fmt.Errorf("Unable to find cluster name in secret")
|
||||
}
|
||||
|
||||
return string(secret.Data["cluster-name"]), nil
|
||||
}
|
||||
|
||||
279
pkg/operators/spoke/controller_test.go
Normal file
279
pkg/operators/spoke/controller_test.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package spoke
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
fakenucleusclient "github.com/open-cluster-management/api/client/nucleus/clientset/versioned/fake"
|
||||
nucleusinformers "github.com/open-cluster-management/api/client/nucleus/informers/externalversions"
|
||||
nucleusapiv1 "github.com/open-cluster-management/api/nucleus/v1"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
fakekube "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
type testController struct {
|
||||
controller *nucleusSpokeController
|
||||
kubeClient *fakekube.Clientset
|
||||
nucleusClient *fakenucleusclient.Clientset
|
||||
}
|
||||
|
||||
type fakeSyncContext struct {
|
||||
key string
|
||||
queue workqueue.RateLimitingInterface
|
||||
recorder events.Recorder
|
||||
}
|
||||
|
||||
func (f fakeSyncContext) Queue() workqueue.RateLimitingInterface { return f.queue }
|
||||
func (f fakeSyncContext) QueueKey() string { return f.key }
|
||||
func (f fakeSyncContext) Recorder() events.Recorder { return f.recorder }
|
||||
|
||||
func newFakeSyncContext(t *testing.T, key string) *fakeSyncContext {
|
||||
return &fakeSyncContext{
|
||||
key: key,
|
||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
recorder: eventstesting.NewTestingEventRecorder(t),
|
||||
}
|
||||
}
|
||||
|
||||
func newSpokeCore(name, namespace, clustername string) *nucleusapiv1.SpokeCore {
|
||||
return &nucleusapiv1.SpokeCore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Finalizers: []string{nucleusSpokeFinalizer},
|
||||
},
|
||||
Spec: nucleusapiv1.SpokeCoreSpec{
|
||||
RegistrationImagePullSpec: "testregistration",
|
||||
WorkImagePullSpec: "testwork",
|
||||
ClusterName: clustername,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newSecret(name, namespace string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
|
||||
func newNamespace(name string) *corev1.Namespace {
|
||||
return &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newTestController(spokecore *nucleusapiv1.SpokeCore, objects ...runtime.Object) *testController {
|
||||
fakeKubeClient := fakekube.NewSimpleClientset(objects...)
|
||||
fakeNucleusClient := fakenucleusclient.NewSimpleClientset(spokecore)
|
||||
nucleusInformers := nucleusinformers.NewSharedInformerFactory(fakeNucleusClient, 5*time.Minute)
|
||||
|
||||
hubController := &nucleusSpokeController{
|
||||
nucleusClient: fakeNucleusClient.NucleusV1().SpokeCores(),
|
||||
kubeClient: fakeKubeClient,
|
||||
nucleusLister: nucleusInformers.Nucleus().V1().SpokeCores().Lister(),
|
||||
}
|
||||
|
||||
store := nucleusInformers.Nucleus().V1().SpokeCores().Informer().GetStore()
|
||||
store.Add(spokecore)
|
||||
|
||||
return &testController{
|
||||
controller: hubController,
|
||||
kubeClient: fakeKubeClient,
|
||||
nucleusClient: fakeNucleusClient,
|
||||
}
|
||||
}
|
||||
|
||||
func assertAction(t *testing.T, actual clienttesting.Action, expected string) {
|
||||
if actual.GetVerb() != expected {
|
||||
t.Errorf("expected %s action but got: %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func assertCondition(t *testing.T, actual runtime.Object, expectedCondition string, expectedStatus metav1.ConditionStatus) {
|
||||
spokeCore := actual.(*nucleusapiv1.SpokeCore)
|
||||
conditions := spokeCore.Status.Conditions
|
||||
if len(conditions) != 1 {
|
||||
t.Errorf("expected 1 condition but got: %#v", conditions)
|
||||
}
|
||||
condition := conditions[0]
|
||||
if condition.Type != expectedCondition {
|
||||
t.Errorf("expected %s but got: %s", expectedCondition, condition.Type)
|
||||
}
|
||||
if condition.Status != expectedStatus {
|
||||
t.Errorf("expected %s but got: %s", expectedStatus, condition.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNameNamespace(t *testing.T, actualName, actualNamespace, name, namespace string) {
|
||||
if actualName != name {
|
||||
t.Errorf("Name of the object does not match, expected %s, actual %s", name, actualName)
|
||||
}
|
||||
|
||||
if actualNamespace != namespace {
|
||||
t.Errorf("Namespace of the object does not match, expected %s, actual %s", namespace, actualNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
func ensureObject(t *testing.T, object runtime.Object, spokeCore *nucleusapiv1.SpokeCore) {
|
||||
access, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to access objectmeta: %v", err)
|
||||
}
|
||||
|
||||
switch o := object.(type) {
|
||||
case *corev1.ServiceAccount:
|
||||
ensureNameNamespace(t, access.GetName(), access.GetNamespace(), fmt.Sprintf("%s-sa", spokeCore.Name), spokeCore.Spec.Namespace)
|
||||
case *appsv1.Deployment:
|
||||
if strings.Contains(access.GetName(), "registration") {
|
||||
ensureNameNamespace(
|
||||
t, access.GetName(), access.GetNamespace(),
|
||||
fmt.Sprintf("%s-registration-agent", spokeCore.Name), spokeCore.Spec.Namespace)
|
||||
if spokeCore.Spec.RegistrationImagePullSpec != o.Spec.Template.Spec.Containers[0].Image {
|
||||
t.Errorf("Image does not match to the expected.")
|
||||
}
|
||||
} else if strings.Contains(access.GetName(), "work") {
|
||||
ensureNameNamespace(
|
||||
t, access.GetName(), access.GetNamespace(),
|
||||
fmt.Sprintf("%s-work-agent", spokeCore.Name), spokeCore.Spec.Namespace)
|
||||
if spokeCore.Spec.WorkImagePullSpec != o.Spec.Template.Spec.Containers[0].Image {
|
||||
t.Errorf("Image does not match to the expected.")
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected deployment")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncDeploy test deployment of spoke components
|
||||
func TestSyncDeploy(t *testing.T) {
|
||||
spokeCore := newSpokeCore("testspoke", "testns", "cluster1")
|
||||
bootStrapSecret := newSecret(bootstrapHubKubeConfigSecret, "testns")
|
||||
hubKubeConfigSecret := newSecret(hubKubeConfigSecret, "testns")
|
||||
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
|
||||
namespace := newNamespace("testns")
|
||||
controller := newTestController(spokeCore, bootStrapSecret, hubKubeConfigSecret, namespace)
|
||||
syncContext := newFakeSyncContext(t, "testspoke")
|
||||
|
||||
err := controller.controller.sync(nil, syncContext)
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
|
||||
createObjects := []runtime.Object{}
|
||||
kubeActions := controller.kubeClient.Actions()
|
||||
for _, action := range kubeActions {
|
||||
if action.GetVerb() == "create" {
|
||||
object := action.(clienttesting.CreateActionImpl).Object
|
||||
createObjects = append(createObjects, object)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if resources are created as expected
|
||||
if len(createObjects) != 4 {
|
||||
t.Errorf("Expect 4 objects created in the sync loop, actual %d", len(createObjects))
|
||||
}
|
||||
for _, object := range createObjects {
|
||||
ensureObject(t, object, spokeCore)
|
||||
}
|
||||
|
||||
nucleusAction := controller.nucleusClient.Actions()
|
||||
if len(nucleusAction) != 2 {
|
||||
t.Errorf("Expect 2 actions in the sync loop, actual %#v", nucleusAction)
|
||||
}
|
||||
|
||||
assertAction(t, nucleusAction[1], "update")
|
||||
assertCondition(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
// TestSyncWithNoSecret test the scenario that bootstrap secret and hub config secret does not exist
|
||||
func TestSyncWithNoSecret(t *testing.T) {
|
||||
spokeCore := newSpokeCore("testspoke", "testns", "")
|
||||
bootStrapSecret := newSecret(bootstrapHubKubeConfigSecret, "testns")
|
||||
hubSecret := newSecret(hubKubeConfigSecret, "testns")
|
||||
namespace := newNamespace("testns")
|
||||
controller := newTestController(spokeCore, namespace)
|
||||
syncContext := newFakeSyncContext(t, "testspoke")
|
||||
|
||||
// Return err since bootstrap secret does not exist
|
||||
err := controller.controller.sync(nil, syncContext)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error when sync")
|
||||
}
|
||||
nucleusAction := controller.nucleusClient.Actions()
|
||||
if len(nucleusAction) != 2 {
|
||||
t.Errorf("Expect 2 actions in the sync loop, actual %#v", nucleusAction)
|
||||
}
|
||||
|
||||
assertAction(t, nucleusAction[1], "update")
|
||||
assertCondition(t, nucleusAction[1].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionFalse)
|
||||
|
||||
// Add bootstrap secret and sync again
|
||||
controller.kubeClient.PrependReactor("get", "secrets", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetVerb() != "get" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
getAction := action.(clienttesting.GetActionImpl)
|
||||
if getAction.Name != bootstrapHubKubeConfigSecret {
|
||||
return false, nil, errors.NewNotFound(
|
||||
corev1.Resource("secrets"), bootstrapHubKubeConfigSecret)
|
||||
}
|
||||
return true, bootStrapSecret, nil
|
||||
})
|
||||
// Return err since cluster-name cannot be found in hubkubeconfig secret
|
||||
err = controller.controller.sync(nil, syncContext)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error when sync")
|
||||
}
|
||||
nucleusAction = controller.nucleusClient.Actions()
|
||||
if len(nucleusAction) != 4 {
|
||||
t.Errorf("Expect 4 actions in the sync loop, actual %#v", nucleusAction)
|
||||
}
|
||||
|
||||
assertAction(t, nucleusAction[3], "update")
|
||||
assertCondition(t, nucleusAction[3].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionFalse)
|
||||
|
||||
// Add hub config secret and sync again
|
||||
hubSecret.Data["kubeconfig"] = []byte("dummykubeconfig")
|
||||
hubSecret.Data["cluster-name"] = []byte("cluster1")
|
||||
controller.kubeClient.PrependReactor("get", "secrets", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetVerb() != "get" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
getAction := action.(clienttesting.GetActionImpl)
|
||||
if getAction.Name != hubKubeConfigSecret {
|
||||
return false, nil, errors.NewNotFound(
|
||||
corev1.Resource("secrets"), hubKubeConfigSecret)
|
||||
}
|
||||
return true, hubSecret, nil
|
||||
})
|
||||
err = controller.controller.sync(nil, syncContext)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error when sync: %v", err)
|
||||
}
|
||||
nucleusAction = controller.nucleusClient.Actions()
|
||||
if len(nucleusAction) != 6 {
|
||||
t.Errorf("Expect 6 actions in the sync loop, actual %#v", nucleusAction)
|
||||
}
|
||||
|
||||
assertAction(t, nucleusAction[5], "update")
|
||||
assertCondition(t, nucleusAction[5].(clienttesting.UpdateActionImpl).Object, spokeCoreApplied, metav1.ConditionTrue)
|
||||
}
|
||||
Reference in New Issue
Block a user