mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
Refactor to contextual logging (#1283)
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m25s
Post / coverage (push) Failing after 36m59s
Post / images (amd64, addon-manager) (push) Failing after 7m34s
Post / images (amd64, placement) (push) Failing after 7m4s
Post / images (amd64, registration) (push) Failing after 7m8s
Post / images (amd64, registration-operator) (push) Failing after 7m3s
Post / images (amd64, work) (push) Failing after 6m59s
Post / images (arm64, addon-manager) (push) Failing after 7m0s
Post / images (arm64, placement) (push) Failing after 6m54s
Post / images (arm64, registration) (push) Failing after 6m55s
Post / images (arm64, registration-operator) (push) Failing after 6m55s
Post / images (arm64, work) (push) Failing after 7m16s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Some checks failed
Scorecard supply-chain security / Scorecard analysis (push) Failing after 1m25s
Post / coverage (push) Failing after 36m59s
Post / images (amd64, addon-manager) (push) Failing after 7m34s
Post / images (amd64, placement) (push) Failing after 7m4s
Post / images (amd64, registration) (push) Failing after 7m8s
Post / images (amd64, registration-operator) (push) Failing after 7m3s
Post / images (amd64, work) (push) Failing after 6m59s
Post / images (arm64, addon-manager) (push) Failing after 7m0s
Post / images (arm64, placement) (push) Failing after 6m54s
Post / images (arm64, registration) (push) Failing after 6m55s
Post / images (arm64, registration-operator) (push) Failing after 6m55s
Post / images (arm64, work) (push) Failing after 7m16s
Post / image manifest (addon-manager) (push) Has been skipped
Post / image manifest (placement) (push) Has been skipped
Post / image manifest (registration) (push) Has been skipped
Post / image manifest (registration-operator) (push) Has been skipped
Post / image manifest (work) (push) Has been skipped
Post / trigger clusteradm e2e (push) Has been skipped
Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
@@ -2,6 +2,7 @@ package addon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/addon-framework/pkg/index"
|
||||
"open-cluster-management.io/addon-framework/pkg/utils"
|
||||
@@ -31,6 +33,14 @@ import (
|
||||
)
|
||||
|
||||
func RunManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
kubeConfig := controllerContext.KubeConfig
|
||||
hubKubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -43,28 +44,29 @@ func NewDefaultKlusterletChartConfig() *KlusterletChartConfig {
|
||||
|
||||
// RenderClusterManagerChart renders the ClusterManager objects to be created on the hub.
|
||||
// It returns three values: CRD objects(which usually should be created before other objects), other Kubernetes objects, error.
|
||||
func RenderClusterManagerChart(config *ClusterManagerChartConfig, namespace string) ([][]byte, [][]byte, error) {
|
||||
func RenderClusterManagerChart(ctx context.Context, config *ClusterManagerChartConfig, namespace string) ([][]byte, [][]byte, error) {
|
||||
if namespace == "" {
|
||||
return nil, nil, fmt.Errorf("cluster manager chart namespace is required")
|
||||
}
|
||||
return renderChart(config, namespace, config.CreateNamespace,
|
||||
return renderChart(ctx, config, namespace, config.CreateNamespace,
|
||||
clustermanagerchart.ChartName, clustermanagerchart.ChartFiles)
|
||||
}
|
||||
|
||||
// RenderKlusterletChart renders the Klusterlet objects to be created on the managed cluster.
|
||||
// It returns three values: CRD objects(which usually should be created before other objects), other Kubernetes objects, error.
|
||||
func RenderKlusterletChart(config *KlusterletChartConfig, namespace string) ([][]byte, [][]byte, error) {
|
||||
func RenderKlusterletChart(ctx context.Context, config *KlusterletChartConfig, namespace string) ([][]byte, [][]byte, error) {
|
||||
if namespace == "" {
|
||||
return nil, nil, fmt.Errorf("klusterlet chart namespace is required")
|
||||
}
|
||||
return renderChart(config, namespace, config.CreateNamespace,
|
||||
return renderChart(ctx, config, namespace, config.CreateNamespace,
|
||||
klusterletchart.ChartName, klusterletchart.ChartFiles)
|
||||
}
|
||||
|
||||
func renderChart[T *ClusterManagerChartConfig | *KlusterletChartConfig](config T,
|
||||
func renderChart[T *ClusterManagerChartConfig | *KlusterletChartConfig](ctx context.Context, config T,
|
||||
namespace string, createNamespace bool, chartName string, fs embed.FS) ([][]byte, [][]byte, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// chartName is the prefix of chart path here
|
||||
operatorChart, err := LoadChart(fs, chartName)
|
||||
operatorChart, err := LoadChart(fs, chartName, logger)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load %s chart: %w", chartName, err)
|
||||
}
|
||||
@@ -82,11 +84,11 @@ func renderChart[T *ClusterManagerChartConfig | *KlusterletChartConfig](config T
|
||||
values, err := chartutil.ToRenderValues(operatorChart, configValues,
|
||||
releaseOptions, &chartutil.Capabilities{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to render helm chart with values %v. err:%v", values, err)
|
||||
logger.Error(err, "failed to render helm chart", "values", values)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
crdObjects, rawObjects, err := renderManifests(operatorChart, values)
|
||||
crdObjects, rawObjects, err := renderManifests(operatorChart, values, logger)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error rendering cluster manager chart: %v", err)
|
||||
}
|
||||
@@ -127,7 +129,7 @@ func stripPrefix(chartPrefix, path string) string {
|
||||
return strings.Join(pathValues[chartPrefixLen:], string(filepath.Separator))
|
||||
}
|
||||
|
||||
func LoadChart(chartFS embed.FS, chartPrefix string) (*chart.Chart, error) {
|
||||
func LoadChart(chartFS embed.FS, chartPrefix string, logger klog.Logger) (*chart.Chart, error) {
|
||||
files, err := getFiles(chartFS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -137,7 +139,7 @@ func LoadChart(chartFS embed.FS, chartPrefix string) (*chart.Chart, error) {
|
||||
for _, fileName := range files {
|
||||
b, err := fs.ReadFile(chartFS, fileName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to read file %v. err:%v", fileName, err)
|
||||
logger.Error(err, "failed to read file", "fileName", fileName)
|
||||
return nil, err
|
||||
}
|
||||
if !strings.HasPrefix(fileName, chartPrefix) {
|
||||
@@ -152,7 +154,7 @@ func LoadChart(chartFS embed.FS, chartPrefix string) (*chart.Chart, error) {
|
||||
|
||||
userChart, err := loader.LoadFiles(bfs)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to load chart. err:%v", err)
|
||||
logger.Error(err, "failed to load chart")
|
||||
return nil, err
|
||||
}
|
||||
return userChart, nil
|
||||
@@ -172,13 +174,13 @@ func JsonStructToValues(a interface{}) (chartutil.Values, error) {
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
func renderManifests(chart *chart.Chart, values chartutil.Values) ([][]byte, [][]byte, error) {
|
||||
func renderManifests(chart *chart.Chart, values chartutil.Values, logger klog.Logger) ([][]byte, [][]byte, error) {
|
||||
var rawCRDObjects, rawObjects [][]byte
|
||||
|
||||
// make sure the CRDs are at the top.
|
||||
crds := chart.CRDObjects()
|
||||
for _, crd := range crds {
|
||||
klog.V(4).Infof("%v/n", crd.File.Data)
|
||||
logger.V(4).Info("crd data", "crdName", crd.Name, "crdData", string(crd.File.Data))
|
||||
rawCRDObjects = append(rawCRDObjects, crd.File.Data)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -166,7 +167,7 @@ func TestClusterManagerConfig(t *testing.T) {
|
||||
version = config.Images.Tag
|
||||
}
|
||||
|
||||
crdObjs, rawObjs, err := RenderClusterManagerChart(config, c.namespace)
|
||||
crdObjs, rawObjs, err := RenderClusterManagerChart(context.Background(), config, c.namespace)
|
||||
if err != nil {
|
||||
t.Errorf("error rendering chart: %v", err)
|
||||
}
|
||||
@@ -477,7 +478,7 @@ func TestKlusterletConfig(t *testing.T) {
|
||||
version = config.Images.Tag
|
||||
}
|
||||
|
||||
crdObjs, rawObjs, err := RenderKlusterletChart(config, c.namespace)
|
||||
crdObjs, rawObjs, err := RenderKlusterletChart(context.Background(), config, c.namespace)
|
||||
if err != nil {
|
||||
t.Errorf("error rendering chart: %v", err)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
|
||||
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
|
||||
|
||||
@@ -436,7 +435,7 @@ func LoadClientConfigFromSecret(secret *corev1.Secret) (*rest.Config, error) {
|
||||
// - kube version: if the kube version is less than v1.14 reutn 1
|
||||
// - node: list master nodes in the cluster and return 1 if the
|
||||
// number of master nodes is equal or less than 1. Return 3 otherwise.
|
||||
func DetermineReplica(ctx context.Context, kubeClient kubernetes.Interface, mode operatorapiv1.InstallMode, kubeVersion *version.Version,
|
||||
func DetermineReplica(ctx context.Context, kubeClient kubernetes.Interface, mode operatorapiv1.InstallMode,
|
||||
controlPlaneNodeLabelSelector string) int32 {
|
||||
// For hosted mode, there may be many cluster-manager/klusterlet running on the management cluster,
|
||||
// set the replica to 1 to reduce the footprint of the management cluster.
|
||||
@@ -444,18 +443,6 @@ func DetermineReplica(ctx context.Context, kubeClient kubernetes.Interface, mode
|
||||
return singleReplica
|
||||
}
|
||||
|
||||
if kubeVersion != nil {
|
||||
// If the cluster does not support lease.coordination.k8s.io/v1, set the replica to 1.
|
||||
// And then the leader election of agent running on this cluster should be disabled, because
|
||||
// it leverages the lease API. Kubernetes starts support lease/v1 from v1.14.
|
||||
if cnt, err := kubeVersion.Compare("v1.14.0"); err != nil {
|
||||
klog.Warningf("set replica to %d because it's failed to check whether the cluster supports lease/v1 or not: %v", singleReplica, err)
|
||||
return singleReplica
|
||||
} else if cnt == -1 {
|
||||
return singleReplica
|
||||
}
|
||||
}
|
||||
|
||||
return DetermineReplicaByNodes(ctx, kubeClient, controlPlaneNodeLabelSelector)
|
||||
}
|
||||
|
||||
@@ -585,20 +572,22 @@ func RemoveRelatedResourcesStatus(
|
||||
}
|
||||
|
||||
func SetRelatedResourcesStatusesWithObj(
|
||||
relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) {
|
||||
ctx context.Context, relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) {
|
||||
res, err := GenerateRelatedResource(objData)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to generate relatedResource %v, and skip to set into status. %v", objData, err)
|
||||
utilruntime.HandleErrorWithContext(ctx, err,
|
||||
"failed to generate relatedResource and skip to set into status", "object", string(objData))
|
||||
return
|
||||
}
|
||||
SetRelatedResourcesStatuses(relatedResourcesStatuses, res)
|
||||
}
|
||||
|
||||
func RemoveRelatedResourcesStatusesWithObj(
|
||||
relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) {
|
||||
ctx context.Context, relatedResourcesStatuses *[]operatorapiv1.RelatedResourceMeta, objData []byte) {
|
||||
res, err := GenerateRelatedResource(objData)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to generate relatedResource %v, and skip to set into status. %v", objData, err)
|
||||
utilruntime.HandleErrorWithContext(ctx, err,
|
||||
"failed to generate relatedResource and skip to set into status", "object", string(objData))
|
||||
return
|
||||
}
|
||||
RemoveRelatedResourcesStatuses(relatedResourcesStatuses, res)
|
||||
@@ -634,28 +623,29 @@ func ResourceType(resourceRequirementAcquirer operatorapiv1.ResourceRequirementA
|
||||
}
|
||||
|
||||
// ResourceRequirements get resource requirements overridden by user for ResourceQosClassResourceRequirement type
|
||||
func ResourceRequirements(resourceRequirementAcquirer operatorapiv1.ResourceRequirementAcquirer) ([]byte, error) {
|
||||
func ResourceRequirements(ctx context.Context, resourceRequirementAcquirer operatorapiv1.ResourceRequirementAcquirer) ([]byte, error) {
|
||||
r := resourceRequirementAcquirer.GetResourceRequirement()
|
||||
if r == nil || r.Type == operatorapiv1.ResourceQosClassBestEffort {
|
||||
return nil, nil
|
||||
}
|
||||
marshal, err := yaml.Marshal(r.ResourceRequirements)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to marshal resource requirement: %v", err)
|
||||
utilruntime.HandleErrorWithContext(ctx, err, "failed to marshal resource requirement")
|
||||
return nil, err
|
||||
}
|
||||
return marshal, nil
|
||||
}
|
||||
|
||||
// AgentPriorityClassName return the name of the PriorityClass that should be used for the klusterlet agents
|
||||
func AgentPriorityClassName(klusterlet *operatorapiv1.Klusterlet, kubeVersion *version.Version) string {
|
||||
func AgentPriorityClassName(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, kubeVersion *version.Version) string {
|
||||
if kubeVersion == nil || klusterlet == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// priorityclass.scheduling.k8s.io/v1 is supported since v1.14.
|
||||
if cnt, err := kubeVersion.Compare("v1.14.0"); err != nil {
|
||||
klog.Warningf("Ignore PriorityClass because it's failed to check whether the cluster supports PriorityClass/v1 or not: %v", err)
|
||||
utilruntime.HandleErrorWithContext(ctx, err,
|
||||
"ignore PriorityClass because it's failed to check whether the cluster supports PriorityClass/v1")
|
||||
return ""
|
||||
} else if cnt == -1 {
|
||||
return ""
|
||||
|
||||
@@ -479,15 +479,10 @@ func newKubeConfigSecret(namespace, name string, kubeConfigData, certData, keyDa
|
||||
}
|
||||
|
||||
func TestDeterminReplica(t *testing.T) {
|
||||
kubeVersionV113, _ := version.ParseGeneric("v1.13.0")
|
||||
kubeVersionV114, _ := version.ParseGeneric("v1.14.0")
|
||||
kubeVersionV122, _ := version.ParseGeneric("v1.22.5+5c84e52")
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
mode operatorapiv1.InstallMode
|
||||
existingNodes []runtime.Object
|
||||
kubeVersion *version.Version
|
||||
expectedReplica int32
|
||||
}{
|
||||
{
|
||||
@@ -512,30 +507,17 @@ func TestDeterminReplica(t *testing.T) {
|
||||
existingNodes: []runtime.Object{newNode("node1"), newNode("node2"), newNode("node3")},
|
||||
expectedReplica: singleReplica,
|
||||
},
|
||||
{
|
||||
name: "kube v1.13",
|
||||
existingNodes: []runtime.Object{newNode("node1"), newNode("node2"), newNode("node3")},
|
||||
kubeVersion: kubeVersionV113,
|
||||
expectedReplica: singleReplica,
|
||||
},
|
||||
{
|
||||
name: "kube v1.14",
|
||||
existingNodes: []runtime.Object{newNode("node1"), newNode("node2"), newNode("node3")},
|
||||
kubeVersion: kubeVersionV114,
|
||||
expectedReplica: defaultReplica,
|
||||
},
|
||||
{
|
||||
name: "kube v1.22.5+5c84e52",
|
||||
existingNodes: []runtime.Object{newNode("node1"), newNode("node2"), newNode("node3")},
|
||||
kubeVersion: kubeVersionV122,
|
||||
expectedReplica: defaultReplica,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fakeKubeClient := fakekube.NewSimpleClientset(c.existingNodes...)
|
||||
replica := DetermineReplica(context.Background(), fakeKubeClient, c.mode, c.kubeVersion, "node-role.kubernetes.io/master=")
|
||||
fakeKubeClient := fakekube.NewClientset(c.existingNodes...)
|
||||
replica := DetermineReplica(context.Background(), fakeKubeClient, c.mode, "node-role.kubernetes.io/master=")
|
||||
if replica != c.expectedReplica {
|
||||
t.Errorf("Unexpected replica, actual: %d, expected: %d", replica, c.expectedReplica)
|
||||
}
|
||||
@@ -604,7 +586,7 @@ func TestAgentPriorityClassName(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
priorityClassName := AgentPriorityClassName(c.klusterlet, c.kubeVersion)
|
||||
priorityClassName := AgentPriorityClassName(context.Background(), c.klusterlet, c.kubeVersion)
|
||||
if priorityClassName != c.expectedPriorityClassName {
|
||||
t.Errorf("Unexpected priorityClassName, actual: %s, expected: %s", priorityClassName, c.expectedPriorityClassName)
|
||||
}
|
||||
@@ -1076,7 +1058,7 @@ func TestSetRelatedResourcesStatusesWithObj(t *testing.T) {
|
||||
objData := assets.MustCreateAssetFromTemplate(c.manifestFile, template, c.config).Data
|
||||
|
||||
relatedResources := c.relatedResources
|
||||
SetRelatedResourcesStatusesWithObj(&relatedResources, objData)
|
||||
SetRelatedResourcesStatusesWithObj(context.Background(), &relatedResources, objData)
|
||||
c.relatedResources = relatedResources
|
||||
if !reflect.DeepEqual(c.relatedResources, c.expectedRelatedResource) {
|
||||
t.Errorf("Expect to get %v, but got %v", c.expectedRelatedResource, c.relatedResources)
|
||||
@@ -1183,7 +1165,7 @@ func TestRemoveRelatedResourcesStatusesWithObj(t *testing.T) {
|
||||
objData := assets.MustCreateAssetFromTemplate(c.manifestFile, template, c.config).Data
|
||||
|
||||
relatedResources := c.relatedResources
|
||||
RemoveRelatedResourcesStatusesWithObj(&relatedResources, objData)
|
||||
RemoveRelatedResourcesStatusesWithObj(context.Background(), &relatedResources, objData)
|
||||
c.relatedResources = relatedResources
|
||||
if !reflect.DeepEqual(c.relatedResources, c.expectedRelatedResource) {
|
||||
t.Errorf("Expect to get %v, but got %v", c.expectedRelatedResource, c.relatedResources)
|
||||
|
||||
@@ -94,7 +94,7 @@ func SyncKubeConfigSecret(ctx context.Context, secretName, secretNamespace, kube
|
||||
return err
|
||||
}
|
||||
|
||||
if tokenValid(secret, tokenGetter) && clusterInfoNotChanged(secret, templateKubeconfig) {
|
||||
if tokenValid(secret, tokenGetter) && clusterInfoNotChanged(ctx, secret, templateKubeconfig) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -138,11 +138,13 @@ func tokenValid(secret *corev1.Secret, tokenGetter TokenGetterFunc) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func clusterInfoNotChanged(secret *corev1.Secret, templateKubeconfig *rest.Config) bool {
|
||||
func clusterInfoNotChanged(ctx context.Context, secret *corev1.Secret, templateKubeconfig *rest.Config) bool {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// check if the templateKubeconfig is changed
|
||||
templateCluster, err := assembleClusterConfig(templateKubeconfig)
|
||||
if err != nil {
|
||||
klog.Infof("Assemble template cluster config error: %s", err)
|
||||
logger.Error(err, "Assemble template cluster config error")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -152,26 +154,27 @@ func clusterInfoNotChanged(secret *corev1.Secret, templateKubeconfig *rest.Confi
|
||||
}
|
||||
kubeconfig, err := clientcmd.Load(saKubeconfig)
|
||||
if err != nil {
|
||||
klog.Infof("Load kubeconfig error: %s", err)
|
||||
logger.Error(err, "Load kubeconfig error")
|
||||
return false
|
||||
}
|
||||
cluster, ok := kubeconfig.Clusters["cluster"]
|
||||
if !ok {
|
||||
klog.Infof("Cluster not found")
|
||||
logger.Info("Cluster not found")
|
||||
return false
|
||||
}
|
||||
|
||||
if cluster.Server != templateCluster.Server {
|
||||
klog.Infof("Cluster host changed from %s to %s", cluster.Server, templateCluster.Server)
|
||||
logger.Info("Cluster host changed from",
|
||||
"before", cluster.Server, "after", templateCluster.Server)
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(cluster.CertificateAuthorityData, templateCluster.CertificateAuthorityData) {
|
||||
klog.Infof("Cluster certificate authority data changed")
|
||||
logger.Info("Cluster certificate authority data changed")
|
||||
return false
|
||||
}
|
||||
if cluster.InsecureSkipTLSVerify != templateCluster.InsecureSkipTLSVerify {
|
||||
klog.Infof("Cluster insecureSkipTLSVerify changed from %v to %v",
|
||||
cluster.InsecureSkipTLSVerify, templateCluster.InsecureSkipTLSVerify)
|
||||
logger.Info("Cluster insecureSkipTLSVerify changed",
|
||||
"before", cluster.InsecureSkipTLSVerify, "after", templateCluster.InsecureSkipTLSVerify)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,6 @@ func TestTokenGetter(t *testing.T) {
|
||||
})
|
||||
tokenGetter := SATokenGetter(context.TODO(), saName, saNamespace, client)
|
||||
token, _, additionalData, err := tokenGetter()
|
||||
fmt.Printf("client action is %v\n", client.Actions())
|
||||
if err != nil && !tt.wantErr {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func NewClusterManagerController(
|
||||
}
|
||||
|
||||
func (n *clusterManagerController) sync(ctx context.Context, controllerContext factory.SyncContext, clusterManagerName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManagmer", clusterManagerName)
|
||||
logger := klog.FromContext(ctx).WithValues("clusterManager", clusterManagerName)
|
||||
logger.V(4).Info("Reconciling ClusterManager")
|
||||
|
||||
originalClusterManager, err := n.clusterManagerLister.Get(clusterManagerName)
|
||||
@@ -136,7 +136,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
|
||||
clusterManagerMode := clusterManager.Spec.DeployOption.Mode
|
||||
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManagerMode)
|
||||
|
||||
resourceRequirements, err := helpers.ResourceRequirements(clusterManager)
|
||||
resourceRequirements, err := helpers.ResourceRequirements(ctx, clusterManager)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to parse resource requirements for cluster manager")
|
||||
return err
|
||||
@@ -150,7 +150,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
|
||||
|
||||
replica := n.deploymentReplicas
|
||||
if replica <= 0 {
|
||||
replica = helpers.DetermineReplica(ctx, n.operatorKubeClient, clusterManager.Spec.DeployOption.Mode, nil, n.controlPlaneNodeLabelSelector)
|
||||
replica = helpers.DetermineReplica(ctx, n.operatorKubeClient, clusterManager.Spec.DeployOption.Mode, n.controlPlaneNodeLabelSelector)
|
||||
}
|
||||
|
||||
// This config is used to render template of manifests.
|
||||
@@ -413,7 +413,7 @@ func cleanResources(ctx context.Context, kubeClient kubernetes.Interface, cm *op
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.RemoveRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.RemoveRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
file,
|
||||
|
||||
@@ -773,7 +773,7 @@ func newFakeHubConfigWithResourceRequirement(t *testing.T, r *operatorapiv1.Reso
|
||||
},
|
||||
}
|
||||
|
||||
resourceRequirements, err := helpers.ResourceRequirements(clusterManager)
|
||||
resourceRequirements, err := helpers.ResourceRequirements(context.Background(), clusterManager)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse resource requirements: %v", err)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM
|
||||
return nil, fmt.Errorf("failed to add labels to template %s: %w", name, err)
|
||||
}
|
||||
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
hubDeployCRDResources...); err != nil {
|
||||
@@ -139,7 +139,7 @@ func (c *crdReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManag
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
hubCRDResourceFiles...); err != nil {
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *hubReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
hubResources...,
|
||||
|
||||
@@ -152,7 +152,7 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
managementResources...,
|
||||
@@ -186,7 +186,7 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
c.recorder,
|
||||
|
||||
@@ -67,7 +67,7 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&cm.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &cm.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
webhookResources...,
|
||||
|
||||
@@ -2,6 +2,7 @@ package clustermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
@@ -31,6 +33,14 @@ type Options struct {
|
||||
|
||||
// RunClusterManagerOperator starts a new cluster manager operator
|
||||
func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
// Build kubclient client and informer for managed cluster
|
||||
kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -199,6 +199,8 @@ func (m *Manager[T]) Apply(ctx context.Context, manifests resourceapply.AssetFun
|
||||
}
|
||||
|
||||
func (m *Manager[T]) applyOne(ctx context.Context, required T) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
accessor, err := meta.Accessor(required)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -206,7 +208,7 @@ func (m *Manager[T]) applyOne(ctx context.Context, required T) error {
|
||||
existing, err := m.client.Get(ctx, accessor.GetName(), metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err := m.client.Create(ctx, required, metav1.CreateOptions{})
|
||||
klog.Infof("crd %s is created", accessor.GetName())
|
||||
logger.Info("crd is created", "crdName", accessor.GetName())
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
@@ -240,7 +242,7 @@ func (m *Manager[T]) applyOne(ctx context.Context, required T) error {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("crd %s is updated to version %s", accessor.GetName(), m.version.String())
|
||||
logger.Info("crd is updated", "crdName", accessor.GetName(), "version", m.version.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -84,6 +84,7 @@ func NewKlusterletCleanupController(
|
||||
func (n *klusterletCleanupController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterletName)
|
||||
logger.V(4).Info("Reconciling Klusterlet")
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
originalKlusterlet, err := n.klusterletLister.Get(klusterletName)
|
||||
if errors.IsNotFound(err) {
|
||||
// Klusterlet not found, could have been deleted, do nothing.
|
||||
@@ -103,7 +104,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
|
||||
}
|
||||
replica := n.deploymentReplicas
|
||||
if replica <= 0 {
|
||||
replica = helpers.DetermineReplica(ctx, n.kubeClient, klusterlet.Spec.DeployOption.Mode, n.kubeVersion, n.controlPlaneNodeLabelSelector)
|
||||
replica = helpers.DetermineReplica(ctx, n.kubeClient, klusterlet.Spec.DeployOption.Mode, n.controlPlaneNodeLabelSelector)
|
||||
}
|
||||
// Klusterlet is deleting, we remove its related resources on managed and management cluster
|
||||
config := klusterletConfig{
|
||||
@@ -214,20 +215,21 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
|
||||
func (n *klusterletCleanupController) checkConnectivity(ctx context.Context,
|
||||
amwClient workv1client.AppliedManifestWorkInterface,
|
||||
klusterlet *operatorapiv1.Klusterlet) (cleanupManagedClusterResources bool, err error) {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterlet.Name)
|
||||
_, err = amwClient.List(ctx, metav1.ListOptions{})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Infof("AppliedManifestWork not found, klusterlet %s", klusterlet.Name)
|
||||
logger.Info("AppliedManifestWork not found")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// if the managed cluster is destroyed, the returned err is TCP timeout or TCP no such host,
|
||||
// the k8s.io/apimachinery/pkg/api/errors.IsTimeout,IsServerTimeout can not match this error
|
||||
if isTCPTimeOutError(err) || isTCPNoSuchHostError(err) || isTCPConnectionRefusedError(err) {
|
||||
klog.V(4).Infof("Check the connectivity for klusterlet %s, annotation: %s, err: %v",
|
||||
klusterlet.Name, klusterlet.Annotations, err)
|
||||
logger.V(4).Info("check the connectivity for klusterlet",
|
||||
"annotations", klusterlet.Annotations, "err", err)
|
||||
if klusterlet.Annotations == nil {
|
||||
klusterlet.Annotations = make(map[string]string, 0)
|
||||
}
|
||||
@@ -239,14 +241,13 @@ func (n *klusterletCleanupController) checkConnectivity(ctx context.Context,
|
||||
}
|
||||
evictionTime, perr := time.Parse(time.RFC3339, evictionTimeStr)
|
||||
if perr != nil {
|
||||
klog.Infof("Parse eviction time %v for klusterlet %s error %s", evictionTimeStr, klusterlet.Name, perr)
|
||||
logger.Error(perr, "Parse eviction time for klusterlet", "evictionTime", evictionTimeStr)
|
||||
klusterlet.Annotations[managedResourcesEvictionTimestampAnno] = time.Now().Format(time.RFC3339)
|
||||
return true, err
|
||||
}
|
||||
|
||||
if evictionTime.Add(5 * time.Minute).Before(time.Now()) {
|
||||
klog.Infof("Try to connect managed cluster timed out for 5 minutes, klusterlet %s, ignore the resources",
|
||||
klusterlet.Name)
|
||||
logger.Info("Try to connect managed cluster timed out for 5 minutes, ignore the resources")
|
||||
// ignore the resources on the managed cluster, return false here
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workv1 "open-cluster-management.io/api/work/v1"
|
||||
@@ -47,7 +46,6 @@ func TestSyncDelete(t *testing.T) {
|
||||
for _, action := range kubeActions {
|
||||
if action.GetVerb() == deleteVerb {
|
||||
deleteAction := action.(clienttesting.DeleteActionImpl)
|
||||
klog.Infof("kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
|
||||
deleteActions = append(deleteActions, deleteAction)
|
||||
}
|
||||
}
|
||||
@@ -106,7 +104,6 @@ func TestSyncDeleteHosted(t *testing.T) {
|
||||
for _, action := range kubeActions {
|
||||
if action.GetVerb() == deleteVerb {
|
||||
deleteAction := action.(clienttesting.DeleteActionImpl)
|
||||
klog.Infof("management kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
|
||||
deleteActionsManagement = append(deleteActionsManagement, deleteAction)
|
||||
}
|
||||
}
|
||||
@@ -121,7 +118,6 @@ func TestSyncDeleteHosted(t *testing.T) {
|
||||
for _, action := range controller.managedKubeClient.Actions() {
|
||||
if action.GetVerb() == deleteVerb {
|
||||
deleteAction := action.(clienttesting.DeleteActionImpl)
|
||||
klog.Infof("managed kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
|
||||
deleteActionsManaged = append(deleteActionsManaged, deleteAction)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,6 +237,7 @@ func (config *klusterletConfig) populateBootstrap(klusterlet *operatorapiv1.Klus
|
||||
|
||||
func (n *klusterletController) sync(ctx context.Context, controllerContext factory.SyncContext, klusterletName string) error {
|
||||
logger := klog.FromContext(ctx).WithValues("klusterlet", klusterletName)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
logger.V(4).Info("Reconciling Klusterlet")
|
||||
originalKlusterlet, err := n.klusterletLister.Get(klusterletName)
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -248,7 +249,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
}
|
||||
klusterlet := originalKlusterlet.DeepCopy()
|
||||
|
||||
resourceRequirements, err := helpers.ResourceRequirements(klusterlet)
|
||||
resourceRequirements, err := helpers.ResourceRequirements(ctx, klusterlet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to parse resource requirements for klusterlet")
|
||||
return err
|
||||
@@ -256,7 +257,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
|
||||
replica := n.deploymentReplicas
|
||||
if replica <= 0 {
|
||||
replica = helpers.DetermineReplica(ctx, n.kubeClient, klusterlet.Spec.DeployOption.Mode, n.kubeVersion, n.controlPlaneNodeLabelSelector)
|
||||
replica = helpers.DetermineReplica(ctx, n.kubeClient, klusterlet.Spec.DeployOption.Mode, n.controlPlaneNodeLabelSelector)
|
||||
}
|
||||
|
||||
config := klusterletConfig{
|
||||
@@ -272,7 +273,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
ExternalServerURL: getServersFromKlusterlet(klusterlet),
|
||||
OperatorNamespace: n.operatorNamespace,
|
||||
Replica: replica,
|
||||
PriorityClassName: helpers.AgentPriorityClassName(klusterlet, n.kubeVersion),
|
||||
PriorityClassName: helpers.AgentPriorityClassName(ctx, klusterlet, n.kubeVersion),
|
||||
|
||||
ExternalManagedKubeConfigSecret: helpers.ExternalManagedKubeConfig,
|
||||
ExternalManagedKubeConfigRegistrationSecret: helpers.ExternalManagedKubeConfigRegistration,
|
||||
|
||||
@@ -255,7 +255,6 @@ func newTestControllerHosted(
|
||||
getRegistrationServiceAccountCount++
|
||||
if getRegistrationServiceAccountCount > 1 {
|
||||
sa := newServiceAccount(name, klusterletNamespace, saRegistrationSecret.Name)
|
||||
klog.Infof("return service account %s/%s, secret: %v", klusterletNamespace, name, sa.Secrets)
|
||||
return true, sa, nil
|
||||
}
|
||||
}
|
||||
@@ -264,7 +263,6 @@ func newTestControllerHosted(
|
||||
getWorkServiceAccountCount++
|
||||
if getWorkServiceAccountCount > 1 {
|
||||
sa := newServiceAccount(name, klusterletNamespace, saWorkSecret.Name)
|
||||
klog.Infof("return service account %s/%s, secret: %v", klusterletNamespace, name, sa.Secrets)
|
||||
return true, sa, nil
|
||||
}
|
||||
}
|
||||
@@ -779,7 +777,6 @@ func TestSyncDeployHosted(t *testing.T) {
|
||||
for _, action := range kubeActions {
|
||||
if action.GetVerb() == createVerb {
|
||||
object := action.(clienttesting.CreateActionImpl).Object
|
||||
klog.Infof("management kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind(), action.GetResource(), action.GetNamespace())
|
||||
createObjectsManagement = append(createObjectsManagement, object)
|
||||
}
|
||||
}
|
||||
@@ -798,7 +795,6 @@ func TestSyncDeployHosted(t *testing.T) {
|
||||
if action.GetVerb() == createVerb {
|
||||
|
||||
object := action.(clienttesting.CreateActionImpl).Object
|
||||
klog.Infof("managed kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind().GroupVersionKind(), action.GetResource(), action.GetNamespace())
|
||||
createObjectsManaged = append(createObjectsManaged, object)
|
||||
}
|
||||
}
|
||||
@@ -1553,7 +1549,7 @@ func newFakeKlusterletConfigWithResourceRequirement(t *testing.T, r *operatorapi
|
||||
},
|
||||
}
|
||||
|
||||
requirements, err := helpers.ResourceRequirements(klusterlet)
|
||||
requirements, err := helpers.ResourceRequirements(context.Background(), klusterlet)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse resource requirements: %v", err)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
crdFiles...,
|
||||
@@ -97,7 +97,7 @@ func (r *crdReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klus
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
crdFiles...,
|
||||
|
||||
@@ -100,7 +100,7 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
managedStaticResourceFiles...,
|
||||
|
||||
@@ -75,7 +75,7 @@ func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operato
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
managementStaticResourceFiles...,
|
||||
|
||||
@@ -68,7 +68,7 @@ func (r *runtimeReconcile) installAgent(ctx context.Context, klusterlet *operato
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, runtimeConfig).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
r.recorder,
|
||||
@@ -117,7 +117,7 @@ func (r *runtimeReconcile) installAgent(ctx context.Context, klusterlet *operato
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, workConfig).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
r.recorder,
|
||||
@@ -167,7 +167,7 @@ func (r *runtimeReconcile) installSingletonAgent(ctx context.Context, klusterlet
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
helpers.SetRelatedResourcesStatusesWithObj(ctx, &klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
r.recorder,
|
||||
|
||||
@@ -2,6 +2,7 @@ package klusterlet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
@@ -34,6 +36,14 @@ type Options struct {
|
||||
|
||||
// RunKlusterletOperator starts a new klusterlet operator
|
||||
func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
// Build kube client and informer
|
||||
kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,11 +3,13 @@ package hub
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
@@ -22,6 +24,14 @@ import (
|
||||
|
||||
// RunControllerManager starts the controllers on hub to make placement decisions.
|
||||
func RunControllerManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
clusterClient, err := clusterclient.NewForConfig(controllerContext.KubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -207,7 +207,7 @@ func (i *Importer) reconcile(
|
||||
return cluster, err
|
||||
}
|
||||
}
|
||||
crdObjs, rawObjs, err := chart.RenderKlusterletChart(klusterletChartConfig, klusterletNamespace)
|
||||
crdObjs, rawObjs, err := chart.RenderKlusterletChart(ctx, klusterletChartConfig, klusterletNamespace)
|
||||
if err != nil {
|
||||
return cluster, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package hub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/metadata"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
generate "k8s.io/kubectl/pkg/generate"
|
||||
cpclientset "sigs.k8s.io/cluster-inventory-api/client/clientset/versioned"
|
||||
cpinformerv1alpha1 "sigs.k8s.io/cluster-inventory-api/client/informers/externalversions"
|
||||
@@ -111,6 +113,14 @@ func (m *HubManagerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
|
||||
// RunControllerManager starts the controllers on hub to manage spoke cluster registration.
|
||||
func (m *HubManagerOptions) RunControllerManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -115,6 +115,14 @@ func (o *SpokeAgentConfig) HealthCheckers() []healthz.HealthChecker {
|
||||
// - the client certificate referenced by the hub kubeconfig become expired (Return failure when
|
||||
// checking the health of the agent);
|
||||
func (o *SpokeAgentConfig) RunSpokeAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// setting up contextual logger
|
||||
logger := klog.NewKlogr()
|
||||
podName := os.Getenv("POD_NAME")
|
||||
if podName != "" {
|
||||
logger = logger.WithValues("podName", podName, "clusterName", o.agentOptions.SpokeClusterName)
|
||||
}
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
kubeConfig := controllerContext.KubeConfig
|
||||
|
||||
// load spoke client config and create spoke clients,
|
||||
|
||||
@@ -59,7 +59,7 @@ var _ = ginkgo.Describe("Cluster Auto Importer", func() {
|
||||
clusterManagerConfig := chart.NewDefaultClusterManagerChartConfig()
|
||||
clusterManagerConfig.CreateBootstrapSA = true
|
||||
clusterManagerConfig.CreateNamespace = true
|
||||
crdObjs, rawObjs, err := chart.RenderClusterManagerChart(clusterManagerConfig, "open-cluster-management")
|
||||
crdObjs, rawObjs, err := chart.RenderClusterManagerChart(context.Background(), clusterManagerConfig, "open-cluster-management")
|
||||
manifests := append(crdObjs, rawObjs...)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
recorder := commonrecorder.NewEventsRecorderWrapper(context.TODO(),
|
||||
|
||||
Reference in New Issue
Block a user