🌱 Refactor code to fix lint warning (#218)

* Refactor code to fix lint warning

Signed-off-by: Jian Qiu <jqiu@redhat.com>

* enable lint for testing files

Signed-off-by: Jian Qiu <jqiu@redhat.com>

---------

Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
Jian Qiu
2023-07-25 13:12:34 +08:00
committed by GitHub
parent e22faa4545
commit e810520961
145 changed files with 1218 additions and 935 deletions

View File

@@ -23,7 +23,6 @@ run:
skip-files:
- ".*\\.pb\\.go"
- ".*\\.gen\\.go"
- ".*_test\\.go"
linters:
# please, do not use `enable-all`: it's deprecated and will be removed soon.
@@ -220,6 +219,7 @@ issues:
linters:
- errcheck
- maligned
- goconst
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all

View File

@@ -609,7 +609,10 @@ func (a byPatchName) Less(i, j int) bool {
return patchi.Namespace < patchj.Namespace
}
func newManagedClusterAddon(name, namespace string, configs []addonv1alpha1.AddOnConfig, configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn {
func newManagedClusterAddon(
name, namespace string,
configs []addonv1alpha1.AddOnConfig,
configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn {
mca := addontesting.NewAddon(name, namespace)
mca.Spec.Configs = configs
mca.Status.ConfigReferences = configStatus

View File

@@ -240,10 +240,14 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
if len(cma.Status.DefaultConfigReferences) != 0 {
t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences)
}
if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
if !apiequality.Semantic.DeepEqual(
cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig,
cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0])
}
if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
if !apiequality.Semantic.DeepEqual(
cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig,
cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0])
}
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstallSucceed {
@@ -389,10 +393,14 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
if len(cma.Status.DefaultConfigReferences) != 0 {
t.Errorf("DefaultConfigReferences object is not correct: %v", cma.Status.DefaultConfigReferences)
}
if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
if !apiequality.Semantic.DeepEqual(
cma.Status.InstallProgressions[0].ConfigReferences[0].LastAppliedConfig,
cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
t.Errorf("InstallProgressions LastAppliedConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0])
}
if !apiequality.Semantic.DeepEqual(cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig, cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
if !apiequality.Semantic.DeepEqual(
cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig,
cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) {
t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0])
}
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgradeSucceed {

View File

@@ -25,11 +25,6 @@ import (
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
)
func newClusterManagementOwner(name string) metav1.OwnerReference {
clusterManagementAddon := addontesting.NewClusterManagementAddon(name, "testcrd", "testcr").Build()
return *metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn"))
}
func TestReconcile(t *testing.T) {
cases := []struct {
name string

View File

@@ -9,6 +9,7 @@ import (
"time"
certificatesv1 "k8s.io/api/certificates/v1"
certificates "k8s.io/api/certificates/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -66,7 +67,7 @@ func TestTemplateCSRConfigurationsFunc(t *testing.T) {
addon: NewFakeTemplateManagedClusterAddon("addon1", "cluster1", "template1", "fakehash"),
expectedConfigs: []addonapiv1alpha1.RegistrationConfig{
{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
Subject: addonapiv1alpha1.Subject{
User: "system:open-cluster-management:cluster:cluster1:addon:addon1:agent:agent1",
@@ -188,7 +189,7 @@ func TestTemplateCSRApproveCheckFunc(t *testing.T) {
Name: "csr1",
},
Spec: certificatesv1.CertificateSigningRequestSpec{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
},
},
expectedApprove: false, // fake csr data
@@ -288,7 +289,7 @@ func TestTemplateCSRSignFunc(t *testing.T) {
Name: "csr1",
},
Spec: certificatesv1.CertificateSigningRequestSpec{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
Username: "system:open-cluster-management:cluster1:adcde",
},
},
@@ -356,7 +357,7 @@ func NewFakeManagedCluster(name string) *clusterv1.ManagedCluster {
return &clusterv1.ManagedCluster{
TypeMeta: metav1.TypeMeta{
Kind: "ManagedCluster",
APIVersion: clusterv1.SchemeGroupVersion.String(),
APIVersion: clusterv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,

View File

@@ -43,7 +43,7 @@ func (m *PermissionApplier) Apply(
recorder events.Recorder,
manifests resourceapply.AssetFunc,
files ...string) []resourceapply.ApplyResult {
ret := []resourceapply.ApplyResult{}
var ret []resourceapply.ApplyResult
for _, file := range files {
result := resourceapply.ApplyResult{File: file}
objBytes, err := manifests(file)
@@ -73,7 +73,7 @@ func (m *PermissionApplier) Apply(
result.Result, result.Changed, result.Error = Apply[*rbacv1.RoleBinding](
ctx, m.roleBindingLister.RoleBindings(t.Namespace), m.client.RbacV1().RoleBindings(t.Namespace), compareRoleBinding, t, recorder)
default:
result.Error = fmt.Errorf("object type is not correct.")
result.Error = fmt.Errorf("object type is not correct")
}
}
return ret

View File

@@ -65,7 +65,7 @@ rules:
},
},
{
name: "comapre and no update clusterrole",
name: "compare and no update clusterrole",
existingManifest: `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -220,7 +220,7 @@ rules:
},
},
{
name: "comapre and no update clusterrole",
name: "compare and no update clusterrole",
existingManifest: `
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -351,13 +351,16 @@ subjects:
informerFactory = informers.NewSharedInformerFactory(kubeClient, 3*time.Minute)
switch t := o.(type) {
case *rbacv1.ClusterRole:
informerFactory.Rbac().V1().ClusterRoles().Informer().GetStore().Add(t)
err = informerFactory.Rbac().V1().ClusterRoles().Informer().GetStore().Add(t)
case *rbacv1.ClusterRoleBinding:
informerFactory.Rbac().V1().ClusterRoleBindings().Informer().GetStore().Add(t)
err = informerFactory.Rbac().V1().ClusterRoleBindings().Informer().GetStore().Add(t)
case *rbacv1.Role:
informerFactory.Rbac().V1().Roles().Informer().GetStore().Add(t)
err = informerFactory.Rbac().V1().Roles().Informer().GetStore().Add(t)
case *rbacv1.RoleBinding:
informerFactory.Rbac().V1().RoleBindings().Informer().GetStore().Add(t)
err = informerFactory.Rbac().V1().RoleBindings().Informer().GetStore().Add(t)
}
if err != nil {
t.Fatal(err)
}
} else {
kubeClient = kubefake.NewSimpleClientset()

View File

@@ -65,7 +65,7 @@ func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) {
flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the agent")
}
// spokeKubeConfig builds kubeconfig for the spoke/managed cluster
// SpokeKubeConfig builds kubeconfig for the spoke/managed cluster
func (o *AgentOptions) SpokeKubeConfig(managedRestConfig *rest.Config) (*rest.Config, error) {
if o.SpokeKubeconfigFile == "" {
managedRestConfig.QPS = o.CommoOpts.QPS

View File

@@ -14,7 +14,7 @@ type Options struct {
QPS float32
}
// NewAgentOptions returns the flags with default value set
// NewOptions returns the flags with default value set
func NewOptions() *Options {
opts := &Options{
QPS: 50,

View File

@@ -58,35 +58,42 @@ func TestComplete(t *testing.T) {
{
name: "override cluster name in cert with specified value",
clusterName: "cluster1",
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{
"kubeconfig": testinghelpers.NewKubeconfig(nil, nil),
"cluster-name": []byte("cluster3"),
"agent-name": []byte("agent3"),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
componentNamespace, "hub-kubeconfig-secret", "",
testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{
"kubeconfig": testinghelpers.NewKubeconfig(nil, nil),
"cluster-name": []byte("cluster3"),
"agent-name": []byte("agent3"),
}),
expectedClusterName: "cluster1",
expectedAgentName: "agent2",
},
{
name: "take cluster/agent name from secret",
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
"cluster-name": []byte("cluster1"),
"agent-name": []byte("agent1"),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
"cluster-name": []byte("cluster1"),
"agent-name": []byte("agent1"),
}),
expectedClusterName: "cluster1",
expectedAgentName: "agent1",
},
{
name: "take cluster/agent name from cert",
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}),
name: "take cluster/agent name from cert",
secret: testinghelpers.NewHubKubeconfigSecret(
componentNamespace, "hub-kubeconfig-secret", "",
testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}),
expectedClusterName: "cluster1",
expectedAgentName: "agent1",
},
{
name: "override cluster name in secret with value from cert",
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{
"cluster-name": []byte("cluster2"),
"agent-name": []byte("agent2"),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
componentNamespace, "hub-kubeconfig-secret", "",
testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{
"cluster-name": []byte("cluster2"),
"agent-name": []byte("agent2"),
}),
expectedClusterName: "cluster1",
expectedAgentName: "agent1",
},
@@ -115,6 +122,9 @@ func TestComplete(t *testing.T) {
err = registration.DumpSecret(
kubeClient.CoreV1(), componentNamespace, "hub-kubeconfig-secret",
options.HubKubeconfigDir, context.TODO(), eventstesting.NewTestingEventRecorder(t))
if err != nil {
t.Error(err)
}
if err := options.Complete(); err != nil {
t.Errorf("unexpected error: %v", err)

View File

@@ -16,7 +16,7 @@ import (
"k8s.io/klog/v2"
)
// Patcher is just the Patch API with a generic to keep use sites type safe.
// PatchClient is just the Patch API with a generic to keep use sites type safe.
// This is inspired by the commiter code in https://github.com/kcp-dev/kcp/blob/main/pkg/reconciler/committer/committer.go
type PatchClient[R runtime.Object] interface {
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (R, error)
@@ -28,6 +28,7 @@ type Patcher[R runtime.Object, Sp any, St any] interface {
PatchStatus(context.Context, R, St, St) (bool, error)
PatchSpec(context.Context, R, Sp, Sp) (bool, error)
PatchLabelAnnotations(context.Context, R, metav1.ObjectMeta, metav1.ObjectMeta) (bool, error)
WithOptions(options PatchOptions) Patcher[R, Sp, St]
}
type PatchOptions struct {
@@ -47,14 +48,14 @@ type patcher[R runtime.Object, Sp any, St any] struct {
opts PatchOptions
}
func NewPatcher[R runtime.Object, Sp any, St any](client PatchClient[R]) *patcher[R, Sp, St] {
func NewPatcher[R runtime.Object, Sp any, St any](client PatchClient[R]) Patcher[R, Sp, St] {
p := &patcher[R, Sp, St]{
client: client,
}
return p
}
func (p *patcher[R, Sp, St]) WithOptions(options PatchOptions) *patcher[R, Sp, St] {
func (p *patcher[R, Sp, St]) WithOptions(options PatchOptions) Patcher[R, Sp, St] {
p.opts = options
return p
}
@@ -66,7 +67,7 @@ func (p *patcher[R, Sp, St]) AddFinalizer(ctx context.Context, object R, finaliz
}
existingFinalizers := accessor.GetFinalizers()
finalizersToAdd := []string{}
var finalizersToAdd []string
for _, finalizer := range finalizers {
hasFinalizer := false
for i := range existingFinalizers {
@@ -120,7 +121,7 @@ func (p *patcher[R, Sp, St]) RemoveFinalizer(ctx context.Context, object R, fina
return err
}
copiedFinalizers := []string{}
var copiedFinalizers []string
existingFinalizers := accessor.GetFinalizers()
for i := range existingFinalizers {
matchFinalizer := false

View File

@@ -328,7 +328,9 @@ func TestPatchLabelAnnotations(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !equality.Semantic.DeepEqual(labelPatch["metadata"], map[string]interface{}{"uid": "", "resourceVersion": "", "labels": map[string]interface{}{"key1": nil}}) {
if !equality.Semantic.DeepEqual(
labelPatch["metadata"],
map[string]interface{}{"uid": "", "resourceVersion": "", "labels": map[string]interface{}{"key1": nil}}) {
t.Errorf("not patched correctly got %v", labelPatch)
}
},

View File

@@ -71,7 +71,7 @@ func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner
caBundleConfigMap.Data = map[string]string{}
}
certificates := []*x509.Certificate{}
var certificates []*x509.Certificate
caBundle := caBundleConfigMap.Data["ca-bundle.crt"]
if len(caBundle) > 0 {
var err error
@@ -83,7 +83,7 @@ func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner
certificates = append([]*x509.Certificate{currentSigner}, certificates...)
certificates = crypto.FilterExpiredCerts(certificates...)
finalCertificates := []*x509.Certificate{}
var finalCertificates []*x509.Certificate
// now check for duplicates. n^2, but super simple
for i := range certificates {
found := false

View File

@@ -85,7 +85,7 @@ func TestManageCABundleConfigMap(t *testing.T) {
}
if !reflect.DeepEqual(c.signerCert, caCerts[0]) {
t.Fatalf("Current signer cert should be put at the begining")
t.Fatalf("Current signer cert should be put at the beginning")
}
}
})

View File

@@ -167,9 +167,9 @@ func TestNeedNewTargetCertKeyPair(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
caBundleCerts := []*x509.Certificate{}
var caBundleCerts []*x509.Certificate
if len(c.caBundle) > 0 {
caBundleCerts, err = cert.ParseCertsPEM([]byte(c.caBundle))
caBundleCerts, err = cert.ParseCertsPEM(c.caBundle)
if err != nil {
t.Fatalf("Expected no error, but got: %v", err)
}

View File

@@ -264,9 +264,9 @@ func ApplyDirectly(
cache resourceapply.ResourceCache,
manifests resourceapply.AssetFunc,
files ...string) []resourceapply.ApplyResult {
ret := []resourceapply.ApplyResult{}
var ret []resourceapply.ApplyResult
genericApplyFiles := []string{}
var genericApplyFiles []string
for _, file := range files {
result := resourceapply.ApplyResult{File: file}
objBytes, err := manifests(file)

View File

@@ -22,7 +22,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/version"
fakekube "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
@@ -38,6 +37,8 @@ import (
"open-cluster-management.io/ocm/manifests"
)
const nameFoo = "foo"
func newValidatingWebhookConfiguration(name, svc, svcNameSpace string) *admissionv1.ValidatingWebhookConfiguration {
return &admissionv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
@@ -198,9 +199,14 @@ func TestApplyDirectly(t *testing.T) {
{
name: "Apply webhooks & secret",
applyFiles: map[string]runtime.Object{
"validatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}),
"mutatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}),
"secret": newUnstructured("v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}),
"validatingwebhooks": newUnstructured(
"admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "",
map[string]interface{}{"webhooks": []interface{}{}}),
"mutatingwebhooks": newUnstructured(
"admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "",
map[string]interface{}{"webhooks": []interface{}{}}),
"secret": newUnstructured(
"v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}),
},
applyFileNames: []string{"validatingwebhooks", "mutatingwebhooks", "secret"},
expectErr: false,
@@ -238,7 +244,7 @@ func TestApplyDirectly(t *testing.T) {
fakeExtensionClient := fakeapiextensions.NewSimpleClientset()
fakeApplyFunc := func(name string) ([]byte, error) {
if c.applyFiles[name] == nil {
return nil, fmt.Errorf("Failed to find file")
return nil, fmt.Errorf("failed to find file")
}
return json.Marshal(c.applyFiles[name])
@@ -267,7 +273,7 @@ func TestApplyDirectly(t *testing.T) {
)
}
aggregatedErr := []error{}
var aggregatedErr []error
for _, r := range results {
if r.Error != nil {
aggregatedErr = append(aggregatedErr, r.Error)
@@ -286,11 +292,18 @@ func TestApplyDirectly(t *testing.T) {
func TestDeleteStaticObject(t *testing.T) {
applyFiles := map[string]runtime.Object{
"validatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}),
"mutatingwebhooks": newUnstructured("admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "", map[string]interface{}{"webhooks": []interface{}{}}),
"secret": newUnstructured("v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}),
"crd": newUnstructured("apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "", "", map[string]interface{}{}),
"kind1": newUnstructured("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": []byte("key1")}}),
"validatingwebhooks": newUnstructured(
"admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", "", "",
map[string]interface{}{"webhooks": []interface{}{}}),
"mutatingwebhooks": newUnstructured(
"admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", "", "",
map[string]interface{}{"webhooks": []interface{}{}}),
"secret": newUnstructured(
"v1", "Secret", "ns1", "n1", map[string]interface{}{"data": map[string]interface{}{"key1": []byte("key1")}}),
"crd": newUnstructured(
"apiextensions.k8s.io/v1beta1", "CustomResourceDefinition", "", "", map[string]interface{}{}),
"kind1": newUnstructured(
"v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": []byte("key1")}}),
}
testcase := []struct {
name string
@@ -338,7 +351,7 @@ func TestDeleteStaticObject(t *testing.T) {
fakeExtensionClient := fakeapiextensions.NewSimpleClientset()
fakeAssetFunc := func(name string) ([]byte, error) {
if applyFiles[name] == nil {
return nil, fmt.Errorf("Failed to find file")
return nil, fmt.Errorf("failed to find file")
}
return json.Marshal(applyFiles[name])
@@ -390,8 +403,9 @@ func TestLoadClientConfigFromSecret(t *testing.T) {
secret: newKubeConfigSecret("ns1", "secret1", newKubeConfig("testhost", "", ""), nil, nil),
},
{
name: "load kubeconfig with references to external key/cert files",
secret: newKubeConfigSecret("ns1", "secret1", newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---")),
name: "load kubeconfig with references to external key/cert files",
secret: newKubeConfigSecret("ns1", "secret1",
newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---")),
expectedCertData: []byte("--- TRUNCATED ---"),
expectedKeyData: []byte("--- REDACTED ---"),
},
@@ -631,13 +645,13 @@ func TestApplyEndpoints(t *testing.T) {
name: "create",
existing: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: nameFoo},
},
},
input: &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo",
Name: nameFoo,
Namespace: nameFoo,
},
Subsets: []corev1.EndpointSubset{
{
@@ -660,7 +674,7 @@ func TestApplyEndpoints(t *testing.T) {
if len(actions) != 2 {
t.Fatal("action count mismatch")
}
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" {
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo {
t.Error("unexpected action:", actions[0])
}
if !actions[1].Matches("create", "endpoints") {
@@ -672,12 +686,12 @@ func TestApplyEndpoints(t *testing.T) {
name: "remain same",
existing: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: nameFoo},
},
&corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo",
Name: nameFoo,
Namespace: nameFoo,
},
Subsets: []corev1.EndpointSubset{
{
@@ -697,8 +711,8 @@ func TestApplyEndpoints(t *testing.T) {
},
input: &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo",
Name: nameFoo,
Namespace: nameFoo,
},
Subsets: []corev1.EndpointSubset{
{
@@ -720,7 +734,7 @@ func TestApplyEndpoints(t *testing.T) {
if len(actions) != 1 {
t.Fatal("action count mismatch")
}
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" {
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo {
t.Error("unexpected action:", actions[0])
}
},
@@ -729,12 +743,12 @@ func TestApplyEndpoints(t *testing.T) {
name: "update",
existing: []runtime.Object{
&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
ObjectMeta: metav1.ObjectMeta{Name: nameFoo},
},
&corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo",
Name: nameFoo,
Namespace: nameFoo,
},
Subsets: []corev1.EndpointSubset{
{
@@ -754,8 +768,8 @@ func TestApplyEndpoints(t *testing.T) {
},
input: &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "foo",
Name: nameFoo,
Namespace: nameFoo,
},
Subsets: []corev1.EndpointSubset{
{
@@ -777,7 +791,7 @@ func TestApplyEndpoints(t *testing.T) {
if len(actions) != 2 {
t.Fatal("action count mismatch")
}
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != "foo" {
if !actions[0].Matches("get", "endpoints") || actions[0].(clienttesting.GetAction).GetName() != nameFoo {
t.Error("unexpected action:", actions[0])
}
if !actions[1].Matches("update", "endpoints") {
@@ -870,10 +884,10 @@ func TestGetRelatedResource(t *testing.T) {
relatedResource, err := GenerateRelatedResource(objData)
if !errors.Is(err, c.expectedErr) {
t.Errorf(diff.ObjectDiff(err, c.expectedErr))
t.Errorf(cmp.Diff(err, c.expectedErr))
}
if !reflect.DeepEqual(relatedResource, c.expectedRelatedResource) {
t.Errorf(diff.ObjectDiff(err, c.expectedErr))
t.Errorf(cmp.Diff(err, c.expectedErr))
}
})
@@ -1267,7 +1281,7 @@ func TestSyncSecret(t *testing.T) {
Name: "sourceName",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{"foo": []byte("bar")},
Data: map[string][]byte{nameFoo: []byte("bar")},
},
},
expectedSecret: &corev1.Secret{
@@ -1276,7 +1290,7 @@ func TestSyncSecret(t *testing.T) {
Name: "targetName",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{"foo": []byte("bar")},
Data: map[string][]byte{nameFoo: []byte("bar")},
},
expectedChanged: true,
expectedErr: "",
@@ -1295,7 +1309,7 @@ func TestSyncSecret(t *testing.T) {
Name: "sourceName",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{"foo": []byte("bar2")},
Data: map[string][]byte{nameFoo: []byte("bar2")},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -1303,7 +1317,7 @@ func TestSyncSecret(t *testing.T) {
Name: "targetName",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{"foo": []byte("bar1")},
Data: map[string][]byte{nameFoo: []byte("bar1")},
},
},
expectedSecret: &corev1.Secret{
@@ -1312,7 +1326,7 @@ func TestSyncSecret(t *testing.T) {
Name: "targetName",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{"foo": []byte("bar2")},
Data: map[string][]byte{nameFoo: []byte("bar2")},
},
expectedChanged: true,
expectedErr: "",
@@ -1343,7 +1357,7 @@ func TestSyncSecret(t *testing.T) {
Name: "sourceName",
},
Type: corev1.SecretTypeServiceAccountToken,
Data: map[string][]byte{"foo": []byte("bar")},
Data: map[string][]byte{nameFoo: []byte("bar")},
},
},
expectedSecret: nil,
@@ -1363,7 +1377,7 @@ func TestSyncSecret(t *testing.T) {
Namespace: "sourceNamespace",
Name: "sourceName",
Annotations: map[string]string{
corev1.ServiceAccountNameKey: "foo",
corev1.ServiceAccountNameKey: nameFoo,
corev1.ServiceAccountUIDKey: "bar",
},
},
@@ -1389,7 +1403,8 @@ func TestSyncSecret(t *testing.T) {
client := fakekube.NewSimpleClientset(tc.existingObjects...)
clientTarget := fakekube.NewSimpleClientset()
secret, changed, err := SyncSecret(
context.TODO(), client.CoreV1(), clientTarget.CoreV1(), events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName, tc.targetNamespace, tc.targetName, tc.ownerRefs)
context.TODO(), client.CoreV1(), clientTarget.CoreV1(),
events.NewInMemoryRecorder("test"), tc.sourceNamespace, tc.sourceName, tc.targetNamespace, tc.targetName, tc.ownerRefs)
if (err == nil && len(tc.expectedErr) != 0) || (err != nil && err.Error() != tc.expectedErr) {
t.Errorf("%s: expected error %v, got %v", tc.name, tc.expectedErr, err)
@@ -1442,9 +1457,11 @@ func TestGetHubKubeconfig(t *testing.T) {
expectedErr: true,
},
{
name: "hosted mode",
mode: operatorapiv1.InstallModeHosted,
secret: []runtime.Object{newKubeConfigSecret("test", ExternalHubKubeConfig, newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---"))},
name: "hosted mode",
mode: operatorapiv1.InstallModeHosted,
secret: []runtime.Object{
newKubeConfigSecret("test", ExternalHubKubeConfig,
newKubeConfig("testhost", "tls.crt", "tls.key"), []byte("--- TRUNCATED ---"), []byte("--- REDACTED ---"))},
namespace: "test",
expectedHost: "https://testhost:443",
expectedErr: false,

View File

@@ -59,7 +59,7 @@ func SATokenGetter(ctx context.Context, saName, saNamespace string, saClient kub
tr, err := saClient.CoreV1().ServiceAccounts(saNamespace).
CreateToken(ctx, saName, &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
ExpirationSeconds: pointer.Int64Ptr(8640 * 3600),
ExpirationSeconds: pointer.Int64(8640 * 3600),
},
}, metav1.CreateOptions{})
if err != nil {
@@ -80,7 +80,7 @@ func SATokenCreater(ctx context.Context, saName, saNamespace string, saClient ku
tr, err := saClient.CoreV1().ServiceAccounts(saNamespace).
CreateToken(ctx, saName, &authv1.TokenRequest{
Spec: authv1.TokenRequestSpec{
ExpirationSeconds: pointer.Int64Ptr(8640 * 3600),
ExpirationSeconds: pointer.Int64(8640 * 3600),
},
}, metav1.CreateOptions{})
if err != nil {

View File

@@ -194,7 +194,10 @@ func TestApplyKubeconfigSecret(t *testing.T) {
return tt.token, expiration, tt.tokenGetError
}
client := testclient.NewSimpleClientset(tt.secrets...)
err := SyncKubeConfigSecret(context.TODO(), secretName, secretNamespace, "/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter, eventstesting.NewTestingEventRecorder(t))
err := SyncKubeConfigSecret(
context.TODO(), secretName, secretNamespace,
"/tmp/kubeconfig", tkc, client.CoreV1(), tokenGetter,
eventstesting.NewTestingEventRecorder(t))
if err != nil && !tt.wantErr {
t.Error(err)
}

View File

@@ -105,7 +105,7 @@ func (c certRotationController) sync(ctx context.Context, syncCtx factory.SyncCo
return nil
}
errs := []error{}
var errs []error
for i := range clustermanagers {
err = c.syncOne(ctx, syncCtx, clustermanagers[i])
if err != nil {
@@ -235,7 +235,7 @@ func (c certRotationController) syncOne(ctx context.Context, syncCtx factory.Syn
}
// reconcile target cert/key pairs
errs := []error{}
var errs []error
for _, targetRotation := range rotations.targetRotations {
if err := targetRotation.EnsureTargetCertKeyPair(ctx, signingCertKeyPair, cabundleCerts); err != nil {
errs = append(errs, err)

View File

@@ -156,14 +156,14 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration",
registrationFeatureGates, ocmfeature.DefaultHubRegistrationFeatureGates)
workFeatureGates := []operatorapiv1.FeatureGate{}
var workFeatureGates []operatorapiv1.FeatureGate
if clusterManager.Spec.WorkConfiguration != nil {
workFeatureGates = clusterManager.Spec.WorkConfiguration.FeatureGates
}
config.WorkFeatureGates, workFeatureMsgs = helpers.ConvertToFeatureGateFlags("Work", workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates)
config.MWReplicaSetEnabled = helpers.FeatureGateEnabled(workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet)
addonFeatureGates := []operatorapiv1.FeatureGate{}
var addonFeatureGates []operatorapiv1.FeatureGate
if clusterManager.Spec.AddOnManagerConfiguration != nil {
addonFeatureGates = clusterManager.Spec.AddOnManagerConfiguration.FeatureGates
}

View File

@@ -36,7 +36,8 @@ import (
)
var (
ctx = context.Background()
ctx = context.Background()
createVerb = "create"
)
type testController struct {
@@ -260,7 +261,8 @@ func setup(t *testing.T, tc *testController, cd []runtime.Object, crds ...runtim
// set clients in clustermanager controller
tc.clusterManagerController.recorder = eventstesting.NewTestingEventRecorder(t)
tc.clusterManagerController.operatorKubeClient = fakeManagementKubeClient
tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) {
tc.clusterManagerController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (
kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) {
return fakeHubKubeClient, fakeAPIExtensionClient, fakeMigrationClient.MigrationV1alpha1(), nil
}
tc.clusterManagerController.ensureSAKubeconfigs = func(ctx context.Context,
@@ -308,10 +310,10 @@ func TestSyncDeploy(t *testing.T) {
t.Fatalf("Expected no error when sync, %v", err)
}
createKubeObjects := []runtime.Object{}
var createKubeObjects []runtime.Object
kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) // record objects from both hub and management cluster
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createKubeObjects = append(createKubeObjects, object)
}
@@ -324,10 +326,10 @@ func TestSyncDeploy(t *testing.T) {
ensureObject(t, object, clusterManager)
}
createCRDObjects := []runtime.Object{}
var createCRDObjects []runtime.Object
crdActions := tc.apiExtensionClient.Actions()
for _, action := range crdActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
@@ -348,10 +350,10 @@ func TestSyncDeployNoWebhook(t *testing.T) {
t.Fatalf("Expected no error when sync, %v", err)
}
createKubeObjects := []runtime.Object{}
var createKubeObjects []runtime.Object
kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...) // record objects from both hub and management cluster
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createKubeObjects = append(createKubeObjects, object)
}
@@ -364,10 +366,10 @@ func TestSyncDeployNoWebhook(t *testing.T) {
ensureObject(t, object, clusterManager)
}
createCRDObjects := []runtime.Object{}
var createCRDObjects []runtime.Object
crdActions := tc.apiExtensionClient.Actions()
for _, action := range crdActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
@@ -393,7 +395,7 @@ func TestSyncDelete(t *testing.T) {
t.Fatalf("Expected non error when sync, %v", err)
}
deleteKubeActions := []clienttesting.DeleteActionImpl{}
var deleteKubeActions []clienttesting.DeleteActionImpl
kubeActions := append(tc.hubKubeClient.Actions(), tc.managementKubeClient.Actions()...)
for _, action := range kubeActions {
if action.GetVerb() == "delete" {
@@ -403,7 +405,7 @@ func TestSyncDelete(t *testing.T) {
}
testingcommon.AssertEqualNumber(t, len(deleteKubeActions), 29) // delete namespace both from the hub cluster and the mangement cluster
deleteCRDActions := []clienttesting.DeleteActionImpl{}
var deleteCRDActions []clienttesting.DeleteActionImpl
crdActions := tc.apiExtensionClient.Actions()
for _, action := range crdActions {
if action.GetVerb() == "delete" {

View File

@@ -42,7 +42,7 @@ type crdStatusController struct {
generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, error)
}
// NewClusterManagerController construct cluster manager hub controller
// NewCRDStatusController construct crd status controller
func NewCRDStatusController(
kubeconfig *rest.Config,
kubeClient kubernetes.Interface,

View File

@@ -70,7 +70,7 @@ type crdMigrationController struct {
generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error)
}
// NewClusterManagerController construct cluster manager hub controller
// NewCRDMigrationController construct crd migration controller
func NewCRDMigrationController(
kubeconfig *rest.Config,
kubeClient kubernetes.Interface,
@@ -231,7 +231,7 @@ func applyStorageVersionMigrations(ctx context.Context,
continue
}
_, _, err = applyStorageVersionMigration(migrationClient, required, recorder)
_, _, err = applyStorageVersionMigration(ctx, migrationClient, required, recorder)
if err != nil {
errs = append(errs, err)
continue
@@ -337,6 +337,7 @@ func parseStorageVersionMigrationFile(
}
func applyStorageVersionMigration(
ctx context.Context,
client migrationv1alpha1client.StorageVersionMigrationsGetter,
required *migrationv1alpha1.StorageVersionMigration,
recorder events.Recorder,
@@ -344,7 +345,7 @@ func applyStorageVersionMigration(
if required == nil {
return nil, false, fmt.Errorf("required StorageVersionMigration is nil")
}
existing, err := client.StorageVersionMigrations().Get(context.TODO(), required.Name, metav1.GetOptions{})
existing, err := client.StorageVersionMigrations().Get(ctx, required.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
actual, err := client.StorageVersionMigrations().Create(context.TODO(), required, metav1.CreateOptions{})
if err != nil {
@@ -370,7 +371,7 @@ func applyStorageVersionMigration(
return existing, false, nil
}
actual, err := client.StorageVersionMigrations().Update(context.TODO(), existingCopy, metav1.UpdateOptions{})
actual, err := client.StorageVersionMigrations().Update(ctx, existingCopy, metav1.UpdateOptions{})
if err != nil {
recorder.Warningf("StorageVersionMigrationUpdateFailed", "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(existingCopy), err)
return actual, true, err

View File

@@ -433,7 +433,10 @@ func TestSync(t *testing.T) {
}
}
func newTestController(t *testing.T, clustermanager *operatorapiv1.ClusterManager, crds ...runtime.Object) (*crdMigrationController, *fakeoperatorlient.Clientset) {
func newTestController(
t *testing.T,
clustermanager *operatorapiv1.ClusterManager,
crds ...runtime.Object) (*crdMigrationController, *fakeoperatorlient.Clientset) {
fakeOperatorClient := fakeoperatorlient.NewSimpleClientset(clustermanager)
operatorInformers := operatorinformers.NewSharedInformerFactory(fakeOperatorClient, 5*time.Minute)
fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset(crds...)
@@ -446,7 +449,8 @@ func newTestController(t *testing.T, clustermanager *operatorapiv1.ClusterManage
*operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus](
fakeOperatorClient.OperatorV1().ClusterManagers()),
}
crdMigrationController.generateHubClusterClients = func(hubKubeConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) {
crdMigrationController.generateHubClusterClients = func(
hubKubeConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) {
return fakeAPIExtensionClient, fakeMigrationClient.MigrationV1alpha1(), nil
}
store := operatorInformers.Operator().V1().ClusterManagers().Informer().GetStore()

View File

@@ -80,10 +80,10 @@ func (s *clusterManagerStatusController) sync(ctx context.Context, controllerCon
clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManagerName, clusterManager.Spec.DeployOption.Mode)
newClusterManager := clusterManager.DeepCopy()
registrationCond := s.updateStatusOfRegistration(ctx, clusterManager.Name, clusterManagerNamespace)
registrationCond := s.updateStatusOfRegistration(clusterManager.Name, clusterManagerNamespace)
registrationCond.ObservedGeneration = clusterManager.Generation
meta.SetStatusCondition(&newClusterManager.Status.Conditions, registrationCond)
placementCond := s.updateStatusOfPlacement(ctx, clusterManager.Name, clusterManagerNamespace)
placementCond := s.updateStatusOfPlacement(clusterManager.Name, clusterManagerNamespace)
placementCond.ObservedGeneration = clusterManager.Generation
meta.SetStatusCondition(&newClusterManager.Status.Conditions, placementCond)
@@ -92,7 +92,7 @@ func (s *clusterManagerStatusController) sync(ctx context.Context, controllerCon
}
// updateStatusOfRegistration checks registration deployment status and updates condition of clustermanager
func (s *clusterManagerStatusController) updateStatusOfRegistration(ctx context.Context, clusterManagerName, clusterManagerNamespace string) metav1.Condition {
func (s *clusterManagerStatusController) updateStatusOfRegistration(clusterManagerName, clusterManagerNamespace string) metav1.Condition {
// Check registration deployment status
registrationDeploymentName := fmt.Sprintf("%s-registration-controller", clusterManagerName)
registrationDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(registrationDeploymentName)
@@ -124,7 +124,7 @@ func (s *clusterManagerStatusController) updateStatusOfRegistration(ctx context.
}
// updateStatusOfRegistration checks placement deployment status and updates condition of clustermanager
func (s *clusterManagerStatusController) updateStatusOfPlacement(ctx context.Context, clusterManagerName, clusterManagerNamespace string) metav1.Condition {
func (s *clusterManagerStatusController) updateStatusOfPlacement(clusterManagerName, clusterManagerNamespace string) metav1.Condition {
// Check registration deployment status
placementDeploymentName := fmt.Sprintf("%s-placement-controller", clusterManagerName)
placementDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(placementDeploymentName)

View File

@@ -55,7 +55,7 @@ func NewBootstrapController(
secretInformers: secretInformers,
}
return factory.New().WithSync(controller.sync).
WithInformersQueueKeyFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister),
WithInformersQueueKeysFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister),
secretInformers[helpers.HubKubeConfig].Informer(),
secretInformers[helpers.BootstrapHubKubeConfig].Informer(),
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
@@ -110,6 +110,7 @@ func (k *bootstrapController) sync(ctx context.Context, controllerContext factor
return nil
}
// #nosec G101
hubKubeconfigSecret, err := k.secretInformers[helpers.HubKubeConfig].Lister().Secrets(agentNamespace).Get(helpers.HubKubeConfig)
switch {
case errors.IsNotFound(err):
@@ -203,28 +204,28 @@ func (k *bootstrapController) loadKubeConfig(secret *corev1.Secret) (*clientcmda
return cluster, nil
}
func bootstrapSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeyFunc {
return func(obj runtime.Object) string {
func bootstrapSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeysFunc {
return func(obj runtime.Object) []string {
accessor, err := meta.Accessor(obj)
if err != nil {
return ""
return []string{}
}
name := accessor.GetName()
if name != helpers.BootstrapHubKubeConfig {
return ""
return []string{}
}
namespace := accessor.GetNamespace()
klusterlets, err := klusterletLister.List(labels.Everything())
if err != nil {
return ""
return []string{}
}
if klusterlet := helpers.FindKlusterletByNamespace(klusterlets, namespace); klusterlet != nil {
return namespace + "/" + klusterlet.Name
return []string{namespace + "/" + klusterlet.Name}
}
return ""
return []string{}
}
}

View File

@@ -13,6 +13,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
@@ -171,25 +172,25 @@ func TestBootstrapSecretQueueKeyFunc(t *testing.T) {
name string
object runtime.Object
klusterlet *operatorapiv1.Klusterlet
expectedKey string
expectedKey []string
}{
{
name: "key by bootstrap secret",
object: newSecret("bootstrap-hub-kubeconfig", "test", []byte{}),
klusterlet: newKlusterlet("testklusterlet", "test"),
expectedKey: "test/testklusterlet",
expectedKey: []string{"test/testklusterlet"},
},
{
name: "key by wrong secret",
object: newSecret("dummy", "test", []byte{}),
klusterlet: newKlusterlet("testklusterlet", "test"),
expectedKey: "",
expectedKey: []string{},
},
{
name: "key by klusterlet with empty namespace",
object: newSecret("bootstrap-hub-kubeconfig", "open-cluster-management-agent", []byte{}),
klusterlet: newKlusterlet("testklusterlet", ""),
expectedKey: "open-cluster-management-agent/testklusterlet",
expectedKey: []string{"open-cluster-management-agent/testklusterlet"},
},
}
@@ -203,7 +204,7 @@ func TestBootstrapSecretQueueKeyFunc(t *testing.T) {
}
keyFunc := bootstrapSecretQueueKeyFunc(operatorInformers.Operator().V1().Klusterlets().Lister())
actualKey := keyFunc(c.object)
if actualKey != c.expectedKey {
if !equality.Semantic.DeepEqual(actualKey, c.expectedKey) {
t.Errorf("Queued key is not correct: actual %s, expected %s", actualKey, c.expectedKey)
}
})

View File

@@ -198,7 +198,7 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
return n.patcher.RemoveFinalizer(ctx, klusterlet, klusterletFinalizer, klusterletHostedFinalizer)
}
func (r *klusterletCleanupController) checkConnectivity(ctx context.Context,
func (n *klusterletCleanupController) checkConnectivity(ctx context.Context,
amwClient workv1client.AppliedManifestWorkInterface,
klusterlet *operatorapiv1.Klusterlet) (cleanupManagedClusterResources bool, err error) {
_, err = amwClient.List(ctx, metav1.ListOptions{})

View File

@@ -39,7 +39,7 @@ func TestSyncDelete(t *testing.T) {
var deleteActions []clienttesting.DeleteActionImpl
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "delete" {
if action.GetVerb() == deleteVerb {
deleteAction := action.(clienttesting.DeleteActionImpl)
klog.Infof("kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
deleteActions = append(deleteActions, deleteAction)
@@ -97,7 +97,7 @@ func TestSyncDeleteHosted(t *testing.T) {
var deleteActionsManagement []clienttesting.DeleteActionImpl
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "delete" {
if action.GetVerb() == deleteVerb {
deleteAction := action.(clienttesting.DeleteActionImpl)
klog.Infof("management kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
deleteActionsManagement = append(deleteActionsManagement, deleteAction)
@@ -112,7 +112,7 @@ func TestSyncDeleteHosted(t *testing.T) {
var deleteActionsManaged []clienttesting.DeleteActionImpl
for _, action := range controller.managedKubeClient.Actions() {
if action.GetVerb() == "delete" {
if action.GetVerb() == deleteVerb {
deleteAction := action.(clienttesting.DeleteActionImpl)
klog.Infof("managed kube delete name: %v\t resource:%v \t namespace:%v", deleteAction.Name, deleteAction.GetResource(), deleteAction.GetNamespace())
deleteActionsManaged = append(deleteActionsManaged, deleteAction)
@@ -177,7 +177,7 @@ func TestSyncDeleteHostedDeleteWaitKubeconfig(t *testing.T) {
// assert no delete action on the management cluster,should wait for the kubeconfig
for _, action := range controller.kubeClient.Actions() {
if action.GetVerb() == "delete" {
if action.GetVerb() == deleteVerb {
t.Errorf("Expected not delete the resources, should wait for the kubeconfig, but got delete actions")
}
}

View File

@@ -238,7 +238,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
config.RegistrationFeatureGates, registrationFeatureMsgs = helpers.ConvertToFeatureGateFlags("Registration",
registrationFeatureGates, ocmfeature.DefaultSpokeRegistrationFeatureGates)
workFeatureGates := []operatorapiv1.FeatureGate{}
var workFeatureGates []operatorapiv1.FeatureGate
if klusterlet.Spec.WorkConfiguration != nil {
workFeatureGates = klusterlet.Spec.WorkConfiguration.FeatureGates
}

View File

@@ -40,6 +40,12 @@ import (
testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing"
)
const (
createVerb = "create"
deleteVerb = "delete"
crdResourceName = "customresourcedefinitions"
)
type testController struct {
controller *klusterletController
cleanupController *klusterletCleanupController
@@ -198,7 +204,10 @@ func newTestController(t *testing.T, klusterlet *operatorapiv1.Klusterlet, appli
}
}
func newTestControllerHosted(t *testing.T, klusterlet *operatorapiv1.Klusterlet, appliedManifestWorks []runtime.Object, objects ...runtime.Object) *testController {
func newTestControllerHosted(
t *testing.T, klusterlet *operatorapiv1.Klusterlet,
appliedManifestWorks []runtime.Object,
objects ...runtime.Object) *testController {
fakeKubeClient := fakekube.NewSimpleClientset(objects...)
fakeAPIExtensionClient := fakeapiextensions.NewSimpleClientset()
fakeOperatorClient := fakeoperatorclient.NewSimpleClientset(klusterlet)
@@ -316,13 +325,13 @@ func (c *testController) setDefaultManagedClusterClientsBuilder() *testControlle
func getDeployments(actions []clienttesting.Action, verb, suffix string) *appsv1.Deployment {
deployments := []*appsv1.Deployment{}
var deployments []*appsv1.Deployment
for _, action := range actions {
if action.GetVerb() != verb || action.GetResource().Resource != "deployments" {
continue
}
if verb == "create" {
if verb == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
deployments = append(deployments, object.(*appsv1.Deployment))
}
@@ -402,8 +411,9 @@ func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, cl
}
if mode == operatorapiv1.InstallModeHosted {
expectArgs = append(expectArgs, "--spoke-kubeconfig=/spoke/config/kubeconfig")
expectArgs = append(expectArgs, "--terminate-on-files=/spoke/config/kubeconfig")
expectArgs = append(expectArgs,
"--spoke-kubeconfig=/spoke/config/kubeconfig",
"--terminate-on-files=/spoke/config/kubeconfig")
}
expectArgs = append(expectArgs, "--terminate-on-files=/spoke/hub-kubeconfig/kubeconfig")
@@ -477,10 +487,10 @@ func TestSyncDeploy(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
createObjects := []runtime.Object{}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
@@ -497,9 +507,9 @@ func TestSyncDeploy(t *testing.T) {
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
createCRDObjects := []runtime.Object{}
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
@@ -538,10 +548,10 @@ func TestSyncDeploySingleton(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
createObjects := []runtime.Object{}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
@@ -558,9 +568,9 @@ func TestSyncDeploySingleton(t *testing.T) {
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
createCRDObjects := []runtime.Object{}
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
@@ -608,17 +618,18 @@ func TestSyncDeployHosted(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
createObjectsManagement := []runtime.Object{}
var createObjectsManagement []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
klog.Infof("management kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind(), action.GetResource(), action.GetNamespace())
createObjectsManagement = append(createObjectsManagement, object)
}
}
// Check if resources are created as expected on the management cluster
// 11 static manifests + 2 secrets(external-managed-kubeconfig-registration,external-managed-kubeconfig-work) + 2 deployments(registration-agent,work-agent) + 1 pull secret
// 11 static manifests + 2 secrets(external-managed-kubeconfig-registration,external-managed-kubeconfig-work) +
// 2 deployments(registration-agent,work-agent) + 1 pull secret
if len(createObjectsManagement) != 16 {
t.Errorf("Expect 16 objects created in the sync loop, actual %d", len(createObjectsManagement))
}
@@ -626,9 +637,9 @@ func TestSyncDeployHosted(t *testing.T) {
ensureObject(t, object, klusterlet)
}
createObjectsManaged := []runtime.Object{}
var createObjectsManaged []runtime.Object
for _, action := range controller.managedKubeClient.Actions() {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
klog.Infof("managed kube create: %v\t resource:%v \t namespace:%v", object.GetObjectKind().GroupVersionKind(), action.GetResource(), action.GetNamespace())
@@ -645,9 +656,9 @@ func TestSyncDeployHosted(t *testing.T) {
}
apiExtenstionAction := controller.apiExtensionClient.Actions()
createCRDObjects := []runtime.Object{}
var createCRDObjects []runtime.Object
for _, action := range apiExtenstionAction {
if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjects = append(createCRDObjects, object)
}
@@ -656,9 +667,9 @@ func TestSyncDeployHosted(t *testing.T) {
t.Errorf("Expect 0 objects created in the sync loop, actual %d", len(createCRDObjects))
}
createCRDObjectsManaged := []runtime.Object{}
var createCRDObjectsManaged []runtime.Object
for _, action := range controller.managedApiExtensionClient.Actions() {
if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" {
if action.GetVerb() == createVerb && action.GetResource().Resource == crdResourceName {
object := action.(clienttesting.CreateActionImpl).Object
createCRDObjectsManaged = append(createCRDObjectsManaged, object)
}
@@ -692,7 +703,7 @@ func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) {
klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1")
meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{
Type: klusterletReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed",
Message: fmt.Sprintf("Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found"),
Message: "Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found",
})
controller := newTestControllerHosted(t, klusterlet, nil).setDefaultManagedClusterClientsBuilder()
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
@@ -704,7 +715,7 @@ func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) {
kubeActions := controller.kubeClient.Actions()
testingcommon.AssertGet(t, kubeActions[0], "", "v1", "namespaces")
testingcommon.AssertAction(t, kubeActions[1], "create")
testingcommon.AssertAction(t, kubeActions[1], createVerb)
if kubeActions[1].GetResource().Resource != "namespaces" {
t.Errorf("expect object namespaces, but got %v", kubeActions[2].GetResource().Resource)
}
@@ -774,8 +785,8 @@ func TestReplica(t *testing.T) {
}
// should have 1 replica for registration deployment and 0 for work
assertRegistrationDeployment(t, controller.kubeClient.Actions(), "create", "", "cluster1", 1)
assertWorkDeployment(t, controller.kubeClient.Actions(), "create", "cluster1", operatorapiv1.InstallModeDefault, 0)
assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1)
assertWorkDeployment(t, controller.kubeClient.Actions(), createVerb, "cluster1", operatorapiv1.InstallModeDefault, 0)
klusterlet = newKlusterlet("klusterlet", "testns", "cluster1")
klusterlet.Status.Conditions = []metav1.Condition{
@@ -838,7 +849,7 @@ func TestClusterNameChange(t *testing.T) {
}
// Check if deployment has the right cluster name set
assertRegistrationDeployment(t, controller.kubeClient.Actions(), "create", "", "cluster1", 1)
assertRegistrationDeployment(t, controller.kubeClient.Actions(), createVerb, "", "cluster1", 1)
operatorAction := controller.operatorClient.Actions()
testingcommon.AssertActions(t, operatorAction, "patch")
@@ -928,7 +939,7 @@ func TestSyncWithPullSecret(t *testing.T) {
var createdSecret *corev1.Secret
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "create" && action.GetResource().Resource == "secrets" {
if action.GetVerb() == createVerb && action.GetResource().Resource == "secrets" {
createdSecret = action.(clienttesting.CreateActionImpl).Object.(*corev1.Secret)
break
}
@@ -958,17 +969,18 @@ func TestDeployOnKube111(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
createObjects := []runtime.Object{}
var createObjects []runtime.Object
kubeActions := controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "create" {
if action.GetVerb() == createVerb {
object := action.(clienttesting.CreateActionImpl).Object
createObjects = append(createObjects, object)
}
}
// Check if resources are created as expected
// 11 managed static manifests + 11 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings
// 11 managed static manifests + 11 management static manifests -
// 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings
if len(createObjects) != 25 {
t.Errorf("Expect 25 objects created in the sync loop, actual %d", len(createObjects))
}
@@ -1003,7 +1015,7 @@ func TestDeployOnKube111(t *testing.T) {
t.Errorf("Expected non error when sync, %v", err)
}
deleteActions := []clienttesting.DeleteActionImpl{}
var deleteActions []clienttesting.DeleteActionImpl
kubeActions = controller.kubeClient.Actions()
for _, action := range kubeActions {
if action.GetVerb() == "delete" {
@@ -1054,19 +1066,19 @@ type fakeManagedClusterBuilder struct {
fakeWorkClient *fakeworkclient.Clientset
}
func (f *fakeManagedClusterBuilder) withMode(mode operatorapiv1.InstallMode) managedClusterClientsBuilderInterface {
func (f *fakeManagedClusterBuilder) withMode(_ operatorapiv1.InstallMode) managedClusterClientsBuilderInterface {
return f
}
func (f *fakeManagedClusterBuilder) withKubeConfigSecret(namespace, name string) managedClusterClientsBuilderInterface {
func (f *fakeManagedClusterBuilder) withKubeConfigSecret(_, _ string) managedClusterClientsBuilderInterface {
return f
}
func (m *fakeManagedClusterBuilder) build(ctx context.Context) (*managedClusterClients, error) {
func (f *fakeManagedClusterBuilder) build(_ context.Context) (*managedClusterClients, error) {
return &managedClusterClients{
kubeClient: m.fakeKubeClient,
apiExtensionClient: m.fakeAPIExtensionClient,
appliedManifestWorkClient: m.fakeWorkClient.WorkV1().AppliedManifestWorks(),
kubeClient: f.fakeKubeClient,
apiExtensionClient: f.fakeAPIExtensionClient,
appliedManifestWorkClient: f.fakeWorkClient.WorkV1().AppliedManifestWorks(),
kubeconfig: &rest.Config{
Host: "testhost",
TLSClientConfig: rest.TLSClientConfig{

View File

@@ -22,7 +22,7 @@ import (
workapiv1 "open-cluster-management.io/api/work/v1"
"open-cluster-management.io/ocm/manifests"
patcher "open-cluster-management.io/ocm/pkg/common/patcher"
"open-cluster-management.io/ocm/pkg/common/patcher"
"open-cluster-management.io/ocm/pkg/operator/helpers"
)
@@ -170,7 +170,7 @@ func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
// cleanUpAppliedManifestWorks removes finalizer from the AppliedManifestWorks whose name starts with
// the hash of the given hub host.
func (r *managedReconcile) cleanUpAppliedManifestWorks(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) error {
func (r *managedReconcile) cleanUpAppliedManifestWorks(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, _ klusterletConfig) error {
appliedManifestWorks, err := r.managedClusterClients.appliedManifestWorkClient.List(ctx, metav1.ListOptions{})
if errors.IsNotFound(err) {
return nil

View File

@@ -336,7 +336,6 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface,
func getHubConfigSSARs(clusterName string) []authorizationv1.SelfSubjectAccessReview {
var reviews []authorizationv1.SelfSubjectAccessReview
// registration resources
certResource := authorizationv1.ResourceAttributes{
Group: "certificates.k8s.io",

View File

@@ -154,7 +154,7 @@ func checkAgentDeploymentDesired(ctx context.Context, kubeClient kubernetes.Inte
// Check agent deployments, if both of them have at least 1 available replicas, return available condition
func checkAgentsDeploymentAvailable(ctx context.Context, kubeClient kubernetes.Interface, agents []klusterletAgent) metav1.Condition {
availableMessages := []string{}
var availableMessages []string
for _, agent := range agents {
deployment, err := kubeClient.AppsV1().Deployments(agent.namespace).Get(ctx, agent.deploymentName, metav1.GetOptions{})
if err != nil {

View File

@@ -14,7 +14,7 @@ import (
clusterscheme "open-cluster-management.io/api/client/cluster/clientset/versioned/scheme"
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
"open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
"open-cluster-management.io/ocm/pkg/placement/debugger"
)

View File

@@ -5,7 +5,7 @@ import (
"reflect"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
cache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/cache"
clusterapiv1 "open-cluster-management.io/api/cluster/v1"
)

View File

@@ -97,7 +97,7 @@ func TestOnClusterChange(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(
@@ -259,7 +259,7 @@ func TestOnClusterUpdate(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(
@@ -361,7 +361,7 @@ func TestOnClusterDelete(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(

View File

@@ -23,17 +23,23 @@ import (
testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing"
)
func newClusterInformerFactory(clusterClient clusterclient.Interface, objects ...runtime.Object) clusterinformers.SharedInformerFactory {
func newClusterInformerFactory(t *testing.T, clusterClient clusterclient.Interface, objects ...runtime.Object) clusterinformers.SharedInformerFactory {
clusterInformerFactory := clusterinformers.NewSharedInformerFactory(clusterClient, time.Minute*10)
clusterInformerFactory.Cluster().V1beta1().Placements().Informer().AddIndexers(cache.Indexers{
err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().AddIndexers(cache.Indexers{
placementsByScore: indexPlacementsByScore,
placementsByClusterSetBinding: indexPlacementByClusterSetBinding,
})
if err != nil {
t.Fatal(err)
}
clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings().Informer().AddIndexers(cache.Indexers{
err = clusterInformerFactory.Cluster().V1beta2().ManagedClusterSetBindings().Informer().AddIndexers(cache.Indexers{
clustersetBindingsByClusterSet: indexClusterSetBindingByClusterSet,
})
if err != nil {
t.Fatal(err)
}
clusterStore := clusterInformerFactory.Cluster().V1().ManagedClusters().Informer().GetStore()
clusterSetStore := clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Informer().GetStore()
@@ -43,19 +49,23 @@ func newClusterInformerFactory(clusterClient clusterclient.Interface, objects ..
addOnPlacementStore := clusterInformerFactory.Cluster().V1alpha1().AddOnPlacementScores().Informer().GetStore()
for _, obj := range objects {
var err error
switch obj.(type) {
case *clusterapiv1.ManagedCluster:
clusterStore.Add(obj)
err = clusterStore.Add(obj)
case *clusterapiv1beta2.ManagedClusterSet:
clusterSetStore.Add(obj)
err = clusterSetStore.Add(obj)
case *clusterapiv1beta2.ManagedClusterSetBinding:
clusterSetBindingStore.Add(obj)
err = clusterSetBindingStore.Add(obj)
case *clusterapiv1beta1.Placement:
placementStore.Add(obj)
err = placementStore.Add(obj)
case *clusterapiv1beta1.PlacementDecision:
placementDecisionStore.Add(obj)
err = placementDecisionStore.Add(obj)
case *clusterapiv1alpha1.AddOnPlacementScore:
addOnPlacementStore.Add(obj)
err = addOnPlacementStore.Add(obj)
}
if err != nil {
t.Fatal(err)
}
}
@@ -175,7 +185,7 @@ func TestEnqueuePlacementsByClusterSet(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(
@@ -282,7 +292,7 @@ func TestEnqueuePlacementsByClusterSetBinding(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(
@@ -370,7 +380,7 @@ func TestEnqueuePlacementsByScore(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
syncCtx := testingcommon.NewFakeSyncContext(t, "fake")
q := newEnqueuer(

View File

@@ -56,7 +56,7 @@ type ScheduleResult interface {
// PrioritizerScores returns total score for each cluster
PrioritizerScores() PrioritizerScore
// Decision returns the decision groups of the schedule
// Decisions returns the decision groups of the schedule
Decisions() []*clusterapiv1.ManagedCluster
// NumOfUnscheduled returns the number of unscheduled.
@@ -180,7 +180,7 @@ func (s *pluginScheduler) Schedule(
}
// filter clusters
filterPipline := []string{}
var filterPipline []string
for _, f := range s.filters {
filterResult, status := f.Filter(ctx, placement, filtered)
@@ -389,10 +389,10 @@ func getPrioritizers(weights map[clusterapiv1beta1.ScoreCoordinate]int32, handle
}
func (r *scheduleResult) FilterResults() []FilterResult {
results := []FilterResult{}
var results []FilterResult
// order the FilterResults by key length
filteredRecordsKey := []string{}
var filteredRecordsKey []string
for name := range r.filteredRecords {
filteredRecordsKey = append(filteredRecordsKey, name)
}

View File

@@ -23,8 +23,6 @@ import (
func TestSchedule(t *testing.T) {
clusterSetName := "clusterSets"
placementNamespace := "ns1"
placementName := "placement1"
cases := []struct {
name string
@@ -294,8 +292,13 @@ func TestSchedule(t *testing.T) {
expectedStatus: *framework.NewStatus("", framework.Success, ""),
},
{
name: "placement with additive Prioritizer Policy",
placement: testinghelpers.NewPlacement(placementNamespace, placementName).WithNOC(2).WithPrioritizerPolicy("Additive").WithPrioritizerConfig("Balance", 3).WithPrioritizerConfig("ResourceAllocatableMemory", 1).WithScoreCoordinateAddOn("demo", "demo", 1).Build(),
name: "placement with additive Prioritizer Policy",
placement: testinghelpers.NewPlacement(placementNamespace, placementName).
WithNOC(2).
WithPrioritizerPolicy("Additive").
WithPrioritizerConfig("Balance", 3).
WithPrioritizerConfig("ResourceAllocatableMemory", 1).
WithScoreCoordinateAddOn("demo", "demo", 1).Build(),
initObjs: []runtime.Object{
testinghelpers.NewClusterSet(clusterSetName).Build(),
testinghelpers.NewClusterSetBinding(placementNamespace, clusterSetName),
@@ -304,13 +307,23 @@ func TestSchedule(t *testing.T) {
testinghelpers.NewAddOnPlacementScore("cluster3", "demo").WithScore("demo", 50).Build(),
},
clusters: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(),
},
expectedDecisions: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
},
expectedFilterResult: []FilterResult{
{
@@ -348,20 +361,33 @@ func TestSchedule(t *testing.T) {
expectedStatus: *framework.NewStatus("", framework.Success, ""),
},
{
name: "placement with exact Prioritizer Policy",
placement: testinghelpers.NewPlacement(placementNamespace, placementName).WithNOC(2).WithPrioritizerPolicy("Exact").WithPrioritizerConfig("Balance", 3).WithPrioritizerConfig("ResourceAllocatableMemory", 1).Build(),
name: "placement with exact Prioritizer Policy",
placement: testinghelpers.NewPlacement(placementNamespace, placementName).
WithNOC(2).WithPrioritizerPolicy("Exact").
WithPrioritizerConfig("Balance", 3).
WithPrioritizerConfig("ResourceAllocatableMemory", 1).Build(),
initObjs: []runtime.Object{
testinghelpers.NewClusterSet(clusterSetName).Build(),
testinghelpers.NewClusterSetBinding(placementNamespace, clusterSetName),
},
clusters: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "0", "100").Build(),
},
expectedDecisions: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "100", "100").Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).
WithResource(clusterapiv1.ResourceMemory, "50", "100").Build(),
},
expectedFilterResult: []FilterResult{
{
@@ -399,12 +425,16 @@ func TestSchedule(t *testing.T) {
WithDecisions("cluster1").Build(),
},
clusters: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedDecisions: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedFilterResult: []FilterResult{
{
@@ -441,12 +471,16 @@ func TestSchedule(t *testing.T) {
WithDecisions("cluster1", "cluster2").Build(),
},
clusters: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedDecisions: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedFilterResult: []FilterResult{
{
@@ -488,12 +522,16 @@ func TestSchedule(t *testing.T) {
WithDecisions("cluster3").Build(),
},
clusters: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster1").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster1").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster2").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedDecisions: []*clusterapiv1.ManagedCluster{
testinghelpers.NewManagedCluster("cluster3").WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
testinghelpers.NewManagedCluster("cluster3").
WithLabel(clusterapiv1beta2.ClusterSetLabel, clusterSetName).Build(),
},
expectedFilterResult: []FilterResult{
{

View File

@@ -8,7 +8,6 @@ import (
"sort"
"strconv"
"strings"
"time"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
@@ -23,7 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
cache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/cache"
kevents "k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
@@ -59,8 +58,6 @@ type clusterDecisionGroup struct {
clusterDecisions []clusterapiv1beta1.ClusterDecision
}
var ResyncInterval = time.Minute * 5
// schedulingController schedules cluster decisions for Placements
type schedulingController struct {
clusterClient clusterclient.Interface
@@ -167,8 +164,7 @@ func NewSchedulingController(
placementInformer.Informer()).
WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string {
accessor, _ := meta.Accessor(obj)
labels := accessor.GetLabels()
placementName := labels[clusterapiv1beta1.PlacementLabel]
placementName := accessor.GetLabels()[clusterapiv1beta1.PlacementLabel]
return fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName)
},
queue.FileterByLabel(clusterapiv1beta1.PlacementLabel),
@@ -287,7 +283,7 @@ func (c *schedulingController) getValidManagedClusterSetBindings(placementNamesp
bindings = nil
}
validBindings := []*clusterapiv1beta2.ManagedClusterSetBinding{}
var validBindings []*clusterapiv1beta2.ManagedClusterSetBinding
for _, binding := range bindings {
// ignore clustersetbinding refers to a non-existent clusterset
_, err := c.clusterSetLister.Get(binding.Name)
@@ -352,7 +348,7 @@ func (c *schedulingController) getAvailableClusters(clusterSetNames []string) ([
return nil, nil
}
result := []*clusterapiv1.ManagedCluster{}
var result []*clusterapiv1.ManagedCluster
for _, c := range availableClusters {
result = append(result, c)
}
@@ -461,8 +457,8 @@ func (c *schedulingController) generatePlacementDecisionsAndStatus(
clusters []*clusterapiv1.ManagedCluster,
) ([]*clusterapiv1beta1.PlacementDecision, []*clusterapiv1beta1.DecisionGroupStatus, *framework.Status) {
placementDecisionIndex := 0
placementDecisions := []*clusterapiv1beta1.PlacementDecision{}
decisionGroupStatus := []*clusterapiv1beta1.DecisionGroupStatus{}
var placementDecisions []*clusterapiv1beta1.PlacementDecision
var decisionGroupStatus []*clusterapiv1beta1.DecisionGroupStatus
// generate decision group
decisionGroups, status := c.generateDecisionGroups(placement, clusters)
@@ -491,7 +487,7 @@ func (c *schedulingController) generateDecisionGroups(
placement *clusterapiv1beta1.Placement,
clusters []*clusterapiv1.ManagedCluster,
) (clusterDecisionGroups, *framework.Status) {
groups := []clusterDecisionGroup{}
var groups []clusterDecisionGroup
// Calculate the group length
// The number of items in each group is determined by the specific number or percentage defined in
@@ -502,7 +498,7 @@ func (c *schedulingController) generateDecisionGroups(
}
// Record the cluster names
clusterNames := sets.NewString()
clusterNames := sets.New[string]()
for _, cluster := range clusters {
clusterNames.Insert(cluster.Name)
}
@@ -514,15 +510,14 @@ func (c *schedulingController) generateDecisionGroups(
if status.IsError() {
return groups, status
}
// If matched clusters number meets groupLength, divide into multiple groups.
decisionGroups := divideDecisionGroups(d.GroupName, matched, groupLength)
groups = append(groups, decisionGroups...)
}
// The rest of the clusters will also be put into decision groups.
matched := []clusterapiv1beta1.ClusterDecision{}
for _, cluster := range clusterNames.List() {
var matched []clusterapiv1beta1.ClusterDecision
for _, cluster := range clusterNames.UnsortedList() {
matched = append(matched, clusterapiv1beta1.ClusterDecision{
ClusterName: cluster,
})
@@ -547,7 +542,7 @@ func (c *schedulingController) generateDecision(
) ([]*clusterapiv1beta1.PlacementDecision, *clusterapiv1beta1.DecisionGroupStatus) {
// split the cluster decisions into slices, the size of each slice cannot exceed
// maxNumOfClusterDecisions.
decisionSlices := [][]clusterapiv1beta1.ClusterDecision{}
var decisionSlices [][]clusterapiv1beta1.ClusterDecision
remainingDecisions := clusterDecisionGroup.clusterDecisions
for index := 0; len(remainingDecisions) > 0; index++ {
var decisionSlice []clusterapiv1beta1.ClusterDecision
@@ -568,8 +563,8 @@ func (c *schedulingController) generateDecision(
decisionSlices = append(decisionSlices, []clusterapiv1beta1.ClusterDecision{})
}
placementDecisionNames := []string{}
placementDecisions := []*clusterapiv1beta1.PlacementDecision{}
var placementDecisionNames []string
var placementDecisions []*clusterapiv1beta1.PlacementDecision
for index, decisionSlice := range decisionSlices {
placementDecisionName := fmt.Sprintf("%s-decision-%d", placement.Name, placementDecisionIndex+index)
owner := metav1.NewControllerRef(placement, clusterapiv1beta1.GroupVersion.WithKind("Placement"))
@@ -612,7 +607,7 @@ func (c *schedulingController) bind(
clusterScores PrioritizerScore,
status *framework.Status,
) error {
errs := []error{}
var errs []error
placementDecisionNames := sets.NewString()
// create/update placement decisions
@@ -775,10 +770,9 @@ func calculateLength(intOrStr *intstr.IntOrString, total int) (int, *framework.S
func filterClustersBySelector(
selector clusterapiv1beta1.ClusterSelector,
clusters []*clusterapiv1.ManagedCluster,
clusterNames sets.String,
clusterNames sets.Set[string],
) ([]clusterapiv1beta1.ClusterDecision, *framework.Status) {
matched := []clusterapiv1beta1.ClusterDecision{}
var matched []clusterapiv1beta1.ClusterDecision
// create cluster label selector
clusterSelector, err := helpers.NewClusterSelector(selector)
if err != nil {
@@ -806,8 +800,7 @@ func filterClustersBySelector(
// divideDecisionGroups divide the matched clusters to the groups and ensuring that each group has the specified length.
func divideDecisionGroups(groupName string, matched []clusterapiv1beta1.ClusterDecision, groupLength int) []clusterDecisionGroup {
groups := []clusterDecisionGroup{}
var groups []clusterDecisionGroup
for len(matched) > 0 {
groupClusters := matched
if groupLength < len(matched) {

View File

@@ -32,6 +32,11 @@ type testScheduler struct {
result ScheduleResult
}
const (
placementNamespace = "ns1"
placementName = "placement1"
)
func (s *testScheduler) Schedule(ctx context.Context,
placement *clusterapiv1beta1.Placement,
clusters []*clusterapiv1.ManagedCluster,
@@ -40,9 +45,6 @@ func (s *testScheduler) Schedule(ctx context.Context,
}
func TestSchedulingController_sync(t *testing.T) {
placementNamespace := "ns1"
placementName := "placement1"
cases := []struct {
name string
placement *clusterapiv1beta1.Placement
@@ -371,7 +373,7 @@ func TestSchedulingController_sync(t *testing.T) {
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// check if PlacementDecision has been updated
testingcommon.AssertActions(t, actions, "create", "patch")
// check if emtpy PlacementDecision has been created
// check if empty PlacementDecision has been created
actual := actions[0].(clienttesting.CreateActionImpl).Object
placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision)
if !ok {
@@ -418,7 +420,7 @@ func TestSchedulingController_sync(t *testing.T) {
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// check if PlacementDecision has been updated
testingcommon.AssertActions(t, actions, "create", "patch")
// check if emtpy PlacementDecision has been created
// check if empty PlacementDecision has been created
actual := actions[0].(clienttesting.CreateActionImpl).Object
placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision)
if !ok {
@@ -468,7 +470,7 @@ func TestSchedulingController_sync(t *testing.T) {
validateActions: func(t *testing.T, actions []clienttesting.Action) {
// check if PlacementDecision has been updated
testingcommon.AssertActions(t, actions, "create", "patch")
// check if emtpy PlacementDecision has been created
// check if empty PlacementDecision has been created
actual := actions[0].(clienttesting.CreateActionImpl).Object
placementDecision, ok := actual.(*clusterapiv1beta1.PlacementDecision)
if !ok {
@@ -551,7 +553,7 @@ func TestSchedulingController_sync(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
c.initObjs = append(c.initObjs, c.placement)
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
s := &testScheduler{result: c.scheduleResult}
ctrl := schedulingController{
@@ -608,7 +610,7 @@ func TestGetValidManagedClusterSetBindings(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
ctrl := &schedulingController{
clusterSetLister: clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(),
@@ -683,7 +685,7 @@ func TestGetValidManagedClusterSets(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
ctrl := &schedulingController{
clusterSetLister: clusterInformerFactory.Cluster().V1beta2().ManagedClusterSets().Lister(),
@@ -811,7 +813,7 @@ func TestGetAvailableClusters(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clusterClient := clusterfake.NewSimpleClientset(c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
ctrl := &schedulingController{
clusterLister: clusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
@@ -975,9 +977,6 @@ func TestNewMisconfiguredCondition(t *testing.T) {
}
func TestBind(t *testing.T) {
placementNamespace := "ns1"
placementName := "placement1"
cases := []struct {
name string
initObjs []runtime.Object
@@ -1387,7 +1386,7 @@ func TestBind(t *testing.T) {
},
)
clusterInformerFactory := newClusterInformerFactory(clusterClient, c.initObjs...)
clusterInformerFactory := newClusterInformerFactory(t, clusterClient, c.initObjs...)
s := &testScheduler{}

View File

@@ -13,7 +13,7 @@ import (
clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
"open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
)
const DebugPath = "/debug/placements/"

View File

@@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"reflect"
@@ -18,7 +18,7 @@ import (
clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
"open-cluster-management.io/ocm/pkg/placement/controllers/framework"
scheduling "open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
"open-cluster-management.io/ocm/pkg/placement/controllers/scheduling"
testinghelpers "open-cluster-management.io/ocm/pkg/placement/helpers/testing"
)
@@ -102,7 +102,7 @@ func TestDebugger(t *testing.T) {
t.Errorf("Expect no error but get %v", err)
}
responseBody, err := ioutil.ReadAll(res.Body)
responseBody, err := io.ReadAll(res.Body)
if err != nil {
t.Errorf("Unexpected error reading response body: %v", err)
}

View File

@@ -151,7 +151,7 @@ func TestGetClusterClaims(t *testing.T) {
expected: map[string]string{"cloud": "Amazon"},
},
{
name: "convert emtpy cluster claim",
name: "convert empty cluster claim",
cluster: testinghelpers.NewManagedCluster("cluster1").Build(),
expected: map[string]string{},
},

View File

@@ -15,12 +15,12 @@ import (
clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
)
type placementBuilder struct {
type PlacementBuilder struct {
placement *clusterapiv1beta1.Placement
}
func NewPlacement(namespace, name string) *placementBuilder {
return &placementBuilder{
func NewPlacement(namespace, name string) *PlacementBuilder {
return &PlacementBuilder{
placement: &clusterapiv1beta1.Placement{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -30,8 +30,8 @@ func NewPlacement(namespace, name string) *placementBuilder {
}
}
func NewPlacementWithAnnotations(namespace, name string, annotations map[string]string) *placementBuilder {
return &placementBuilder{
func NewPlacementWithAnnotations(namespace, name string, annotations map[string]string) *PlacementBuilder {
return &PlacementBuilder{
placement: &clusterapiv1beta1.Placement{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -42,29 +42,29 @@ func NewPlacementWithAnnotations(namespace, name string, annotations map[string]
}
}
func (b *placementBuilder) WithUID(uid string) *placementBuilder {
func (b *PlacementBuilder) WithUID(uid string) *PlacementBuilder {
b.placement.UID = types.UID(uid)
return b
}
func (b *placementBuilder) WithNOC(noc int32) *placementBuilder {
func (b *PlacementBuilder) WithNOC(noc int32) *PlacementBuilder {
b.placement.Spec.NumberOfClusters = &noc
return b
}
func (b *placementBuilder) WithGroupStrategy(groupStrategy clusterapiv1beta1.GroupStrategy) *placementBuilder {
func (b *PlacementBuilder) WithGroupStrategy(groupStrategy clusterapiv1beta1.GroupStrategy) *PlacementBuilder {
b.placement.Spec.DecisionStrategy.GroupStrategy = groupStrategy
return b
}
func (b *placementBuilder) WithPrioritizerPolicy(mode clusterapiv1beta1.PrioritizerPolicyModeType) *placementBuilder {
func (b *PlacementBuilder) WithPrioritizerPolicy(mode clusterapiv1beta1.PrioritizerPolicyModeType) *PlacementBuilder {
b.placement.Spec.PrioritizerPolicy = clusterapiv1beta1.PrioritizerPolicy{
Mode: mode,
}
return b
}
func (b *placementBuilder) WithPrioritizerConfig(name string, weight int32) *placementBuilder {
func (b *PlacementBuilder) WithPrioritizerConfig(name string, weight int32) *PlacementBuilder {
if b.placement.Spec.PrioritizerPolicy.Configurations == nil {
b.placement.Spec.PrioritizerPolicy.Configurations = []clusterapiv1beta1.PrioritizerConfig{}
}
@@ -80,7 +80,7 @@ func (b *placementBuilder) WithPrioritizerConfig(name string, weight int32) *pla
return b
}
func (b *placementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName string, weight int32) *placementBuilder {
func (b *PlacementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName string, weight int32) *PlacementBuilder {
if b.placement.Spec.PrioritizerPolicy.Configurations == nil {
b.placement.Spec.PrioritizerPolicy.Configurations = []clusterapiv1beta1.PrioritizerConfig{}
}
@@ -96,18 +96,18 @@ func (b *placementBuilder) WithScoreCoordinateAddOn(resourceName, scoreName stri
return b
}
func (b *placementBuilder) WithClusterSets(clusterSets ...string) *placementBuilder {
func (b *PlacementBuilder) WithClusterSets(clusterSets ...string) *PlacementBuilder {
b.placement.Spec.ClusterSets = clusterSets
return b
}
func (b *placementBuilder) WithDeletionTimestamp() *placementBuilder {
func (b *PlacementBuilder) WithDeletionTimestamp() *PlacementBuilder {
now := metav1.Now()
b.placement.DeletionTimestamp = &now
return b
}
func (b *placementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, claimSelector *clusterapiv1beta1.ClusterClaimSelector) *placementBuilder {
func (b *PlacementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, claimSelector *clusterapiv1beta1.ClusterClaimSelector) *PlacementBuilder {
if b.placement.Spec.Predicates == nil {
b.placement.Spec.Predicates = []clusterapiv1beta1.ClusterPredicate{}
}
@@ -115,7 +115,7 @@ func (b *placementBuilder) AddPredicate(labelSelector *metav1.LabelSelector, cla
return b
}
func (b *placementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleration) *placementBuilder {
func (b *PlacementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleration) *PlacementBuilder {
if b.placement.Spec.Tolerations == nil {
b.placement.Spec.Tolerations = []clusterapiv1beta1.Toleration{}
}
@@ -123,7 +123,7 @@ func (b *placementBuilder) AddToleration(toleration *clusterapiv1beta1.Toleratio
return b
}
func (b *placementBuilder) WithNumOfSelectedClusters(nosc int, placementName string) *placementBuilder {
func (b *PlacementBuilder) WithNumOfSelectedClusters(nosc int, placementName string) *PlacementBuilder {
b.placement.Status.NumberOfSelectedClusters = int32(nosc)
b.placement.Status.DecisionGroups = []clusterapiv1beta1.DecisionGroupStatus{
{
@@ -136,7 +136,7 @@ func (b *placementBuilder) WithNumOfSelectedClusters(nosc int, placementName str
return b
}
func (b *placementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numbOfUnscheduledDecisions int) *placementBuilder {
func (b *PlacementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numbOfUnscheduledDecisions int) *PlacementBuilder {
condition := metav1.Condition{
Type: clusterapiv1beta1.PlacementConditionSatisfied,
}
@@ -154,7 +154,7 @@ func (b *placementBuilder) WithSatisfiedCondition(numbOfScheduledDecisions, numb
return b
}
func (b *placementBuilder) WithMisconfiguredCondition(status metav1.ConditionStatus) *placementBuilder {
func (b *PlacementBuilder) WithMisconfiguredCondition(status metav1.ConditionStatus) *PlacementBuilder {
condition := metav1.Condition{
Type: clusterapiv1beta1.PlacementConditionMisconfigured,
Status: status,
@@ -165,7 +165,7 @@ func (b *placementBuilder) WithMisconfiguredCondition(status metav1.ConditionSta
return b
}
func (b *placementBuilder) Build() *clusterapiv1beta1.Placement {
func (b *PlacementBuilder) Build() *clusterapiv1beta1.Placement {
return b.placement
}
@@ -185,12 +185,12 @@ func NewClusterPredicate(labelSelector *metav1.LabelSelector, claimSelector *clu
return predicate
}
type placementDecisionBuilder struct {
type PlacementDecisionBuilder struct {
placementDecision *clusterapiv1beta1.PlacementDecision
}
func NewPlacementDecision(namespace, name string) *placementDecisionBuilder {
return &placementDecisionBuilder{
func NewPlacementDecision(namespace, name string) *PlacementDecisionBuilder {
return &PlacementDecisionBuilder{
placementDecision: &clusterapiv1beta1.PlacementDecision{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -200,7 +200,7 @@ func NewPlacementDecision(namespace, name string) *placementDecisionBuilder {
}
}
func (b *placementDecisionBuilder) WithController(uid string) *placementDecisionBuilder {
func (b *PlacementDecisionBuilder) WithController(uid string) *PlacementDecisionBuilder {
controller := true
b.placementDecision.OwnerReferences = append(b.placementDecision.OwnerReferences, metav1.OwnerReference{
Controller: &controller,
@@ -209,7 +209,7 @@ func (b *placementDecisionBuilder) WithController(uid string) *placementDecision
return b
}
func (b *placementDecisionBuilder) WithLabel(name, value string) *placementDecisionBuilder {
func (b *PlacementDecisionBuilder) WithLabel(name, value string) *PlacementDecisionBuilder {
if b.placementDecision.Labels == nil {
b.placementDecision.Labels = map[string]string{}
}
@@ -217,14 +217,14 @@ func (b *placementDecisionBuilder) WithLabel(name, value string) *placementDecis
return b
}
func (b *placementDecisionBuilder) WithDeletionTimestamp() *placementDecisionBuilder {
func (b *PlacementDecisionBuilder) WithDeletionTimestamp() *PlacementDecisionBuilder {
now := metav1.Now()
b.placementDecision.DeletionTimestamp = &now
return b
}
func (b *placementDecisionBuilder) WithDecisions(clusterNames ...string) *placementDecisionBuilder {
decisions := []clusterapiv1beta1.ClusterDecision{}
func (b *PlacementDecisionBuilder) WithDecisions(clusterNames ...string) *PlacementDecisionBuilder {
var decisions []clusterapiv1beta1.ClusterDecision
for _, clusterName := range clusterNames {
decisions = append(decisions, clusterapiv1beta1.ClusterDecision{
ClusterName: clusterName,
@@ -234,16 +234,16 @@ func (b *placementDecisionBuilder) WithDecisions(clusterNames ...string) *placem
return b
}
func (b *placementDecisionBuilder) Build() *clusterapiv1beta1.PlacementDecision {
func (b *PlacementDecisionBuilder) Build() *clusterapiv1beta1.PlacementDecision {
return b.placementDecision
}
type managedClusterBuilder struct {
type ManagedClusterBuilder struct {
cluster *clusterapiv1.ManagedCluster
}
func NewManagedCluster(clusterName string) *managedClusterBuilder {
return &managedClusterBuilder{
func NewManagedCluster(clusterName string) *ManagedClusterBuilder {
return &ManagedClusterBuilder{
cluster: &clusterapiv1.ManagedCluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
@@ -252,7 +252,7 @@ func NewManagedCluster(clusterName string) *managedClusterBuilder {
}
}
func (b *managedClusterBuilder) WithLabel(name, value string) *managedClusterBuilder {
func (b *ManagedClusterBuilder) WithLabel(name, value string) *ManagedClusterBuilder {
if b.cluster.Labels == nil {
b.cluster.Labels = map[string]string{}
}
@@ -260,14 +260,14 @@ func (b *managedClusterBuilder) WithLabel(name, value string) *managedClusterBui
return b
}
func (b *managedClusterBuilder) WithClaim(name, value string) *managedClusterBuilder {
func (b *ManagedClusterBuilder) WithClaim(name, value string) *ManagedClusterBuilder {
claimMap := map[string]string{}
for _, claim := range b.cluster.Status.ClusterClaims {
claimMap[claim.Name] = claim.Value
}
claimMap[name] = value
clusterClaims := []clusterapiv1.ManagedClusterClaim{}
var clusterClaims []clusterapiv1.ManagedClusterClaim
for k, v := range claimMap {
clusterClaims = append(clusterClaims, clusterapiv1.ManagedClusterClaim{
Name: k,
@@ -279,7 +279,7 @@ func (b *managedClusterBuilder) WithClaim(name, value string) *managedClusterBui
return b
}
func (b *managedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceName, allocatable, capacity string) *managedClusterBuilder {
func (b *ManagedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceName, allocatable, capacity string) *ManagedClusterBuilder {
if b.cluster.Status.Allocatable == nil {
b.cluster.Status.Allocatable = make(map[clusterapiv1.ResourceName]resource.Quantity)
}
@@ -292,7 +292,7 @@ func (b *managedClusterBuilder) WithResource(resourceName clusterapiv1.ResourceN
return b
}
func (b *managedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *managedClusterBuilder {
func (b *ManagedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *ManagedClusterBuilder {
if b.cluster.Spec.Taints == nil {
b.cluster.Spec.Taints = []clusterapiv1.Taint{}
}
@@ -300,16 +300,16 @@ func (b *managedClusterBuilder) WithTaint(taint *clusterapiv1.Taint) *managedClu
return b
}
func (b *managedClusterBuilder) Build() *clusterapiv1.ManagedCluster {
func (b *ManagedClusterBuilder) Build() *clusterapiv1.ManagedCluster {
return b.cluster
}
type managedClusterSetBuilder struct {
type ManagedClusterSetBuilder struct {
clusterset *clusterapiv1beta2.ManagedClusterSet
}
func NewClusterSet(clusterSetName string) *managedClusterSetBuilder {
return &managedClusterSetBuilder{
func NewClusterSet(clusterSetName string) *ManagedClusterSetBuilder {
return &ManagedClusterSetBuilder{
clusterset: &clusterapiv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: clusterSetName,
@@ -318,12 +318,12 @@ func NewClusterSet(clusterSetName string) *managedClusterSetBuilder {
}
}
func (b *managedClusterSetBuilder) WithClusterSelector(clusterSelector clusterapiv1beta2.ManagedClusterSelector) *managedClusterSetBuilder {
func (b *ManagedClusterSetBuilder) WithClusterSelector(clusterSelector clusterapiv1beta2.ManagedClusterSelector) *ManagedClusterSetBuilder {
b.clusterset.Spec.ClusterSelector = clusterSelector
return b
}
func (b *managedClusterSetBuilder) Build() *clusterapiv1beta2.ManagedClusterSet {
func (b *ManagedClusterSetBuilder) Build() *clusterapiv1beta2.ManagedClusterSet {
return b.clusterset
}
@@ -339,12 +339,12 @@ func NewClusterSetBinding(namespace, clusterSetName string) *clusterapiv1beta2.M
}
}
type addOnPlacementScoreBuilder struct {
type AddOnPlacementScoreBuilder struct {
addOnPlacementScore *clusterapiv1alpha1.AddOnPlacementScore
}
func NewAddOnPlacementScore(clusternamespace, name string) *addOnPlacementScoreBuilder {
return &addOnPlacementScoreBuilder{
func NewAddOnPlacementScore(clusternamespace, name string) *AddOnPlacementScoreBuilder {
return &AddOnPlacementScoreBuilder{
addOnPlacementScore: &clusterapiv1alpha1.AddOnPlacementScore{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusternamespace,
@@ -354,7 +354,7 @@ func NewAddOnPlacementScore(clusternamespace, name string) *addOnPlacementScoreB
}
}
func (a *addOnPlacementScoreBuilder) WithScore(name string, score int32) *addOnPlacementScoreBuilder {
func (a *AddOnPlacementScoreBuilder) WithScore(name string, score int32) *AddOnPlacementScoreBuilder {
if a.addOnPlacementScore.Status.Scores == nil {
a.addOnPlacementScore.Status.Scores = []clusterapiv1alpha1.AddOnPlacementScoreItem{}
}
@@ -366,12 +366,12 @@ func (a *addOnPlacementScoreBuilder) WithScore(name string, score int32) *addOnP
return a
}
func (a *addOnPlacementScoreBuilder) WithValidUntil(validUntil time.Time) *addOnPlacementScoreBuilder {
func (a *AddOnPlacementScoreBuilder) WithValidUntil(validUntil time.Time) *AddOnPlacementScoreBuilder {
vu := metav1.NewTime(validUntil)
a.addOnPlacementScore.Status.ValidUntil = &vu
return a
}
func (a *addOnPlacementScoreBuilder) Build() *clusterapiv1alpha1.AddOnPlacementScore {
func (a *AddOnPlacementScoreBuilder) Build() *clusterapiv1alpha1.AddOnPlacementScore {
return a.addOnPlacementScore
}

View File

@@ -36,14 +36,6 @@ const (
// ClusterCertificateRotatedCondition is a condition type that client certificate is rotated
ClusterCertificateRotatedCondition = "ClusterCertificateRotated"
// ClientCertificateUpdateFailedReason is a reason of condition ClusterCertificateRotatedCondition that
// the client certificate rotation fails.
ClientCertificateUpdateFailedReason = "ClientCertificateUpdateFailed"
// ClientCertificateUpdatedReason is a reason of condition ClusterCertificateRotatedCondition that
// the the client certificate succeeds
ClientCertificateUpdatedReason = "ClientCertificateUpdated"
)
// ControllerResyncInterval is exposed so that integration tests can crank up the constroller sync speed.

View File

@@ -29,7 +29,7 @@ import (
"open-cluster-management.io/ocm/pkg/registration/helpers"
)
// HasValidClientCertificate checks if there exists a valid client certificate in the given secret
// HasValidHubKubeconfig checks if there exists a valid client certificate in the given secret
// Returns true if all the conditions below are met:
// 1. KubeconfigFile exists when hasKubeconfig is true
// 2. TLSKeyFile exists
@@ -177,7 +177,7 @@ type CSRControl interface {
isApproved(name string) (bool, error)
getIssuedCertificate(name string) ([]byte, error)
// public so we can add indexer outside
// Informer is public so we can add indexer outside
Informer() cache.SharedIndexInformer
}

View File

@@ -82,27 +82,31 @@ func TestHasValidHubKubeconfig(t *testing.T) {
},
{
name: "no cert",
secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key")}, map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key")}, map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
},
{
name: "bad cert",
secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key"), Cert: []byte("bad cert")}, map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
testNamespace, testSecretName, "", &testinghelpers.TestCert{Key: []byte("key"), Cert: []byte("bad cert")}, map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
},
{
name: "expired cert",
secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", -60*time.Second), map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", -60*time.Second), map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
},
{
name: "invalid common name",
secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", 60*time.Second), map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
secret: testinghelpers.NewHubKubeconfigSecret(
testNamespace, testSecretName, "", testinghelpers.NewTestCert("test", 60*time.Second), map[string][]byte{
KubeconfigFile: testinghelpers.NewKubeconfig(nil, nil),
}),
subject: &pkix.Name{
CommonName: "wrong-common-name",
},
@@ -204,8 +208,10 @@ func TestGetCertValidityPeriod(t *testing.T) {
expectedErr: "no client certificate found in secret \"testns/testsecret\"",
},
{
name: "bad cert",
secret: testinghelpers.NewHubKubeconfigSecret(testNamespace, testSecretName, "", &testinghelpers.TestCert{Cert: []byte("bad cert")}, map[string][]byte{}),
name: "bad cert",
secret: testinghelpers.NewHubKubeconfigSecret(
testNamespace, testSecretName, "",
&testinghelpers.TestCert{Cert: []byte("bad cert")}, map[string][]byte{}),
expectedErr: "unable to parse TLS certificates: data does not contain any valid RSA or ECDSA certificates",
},
{

View File

@@ -153,7 +153,7 @@ func TestSync(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
ctrl := &mockCSRControl{}
csrs := []runtime.Object{}
var csrs []runtime.Object
if c.approvedCSRCert != nil {
csr := testinghelpers.NewApprovedCSR(testinghelpers.CSRHolder{Name: testCSRName})
csr.Status.Certificate = c.approvedCSRCert.Cert
@@ -224,7 +224,7 @@ func TestSync(t *testing.T) {
}
if !conditionEqual(c.expectedCondition, updater.cond) {
t.Errorf("conditon is not correct, expected %v, got %v", c.expectedCondition, updater.cond)
t.Errorf("condition is not correct, expected %v, got %v", c.expectedCondition, updater.cond)
}
c.validateActions(t, hubKubeClient.Actions(), agentKubeClient.Actions())
@@ -258,7 +258,7 @@ type fakeStatusUpdater struct {
cond *metav1.Condition
}
func (f *fakeStatusUpdater) update(ctx context.Context, cond metav1.Condition) error {
func (f *fakeStatusUpdater) update(_ context.Context, cond metav1.Condition) error {
f.cond = cond.DeepCopy()
return nil
}
@@ -269,7 +269,8 @@ type mockCSRControl struct {
csrClient *clienttesting.Fake
}
func (m *mockCSRControl) create(ctx context.Context, recorder events.Recorder, objMeta metav1.ObjectMeta, csrData []byte, signerName string, expirationSeconds *int32) (string, error) {
func (m *mockCSRControl) create(
_ context.Context, _ events.Recorder, objMeta metav1.ObjectMeta, _ []byte, _ string, _ *int32) (string, error) {
mockCSR := &unstructured.Unstructured{}
_, err := m.csrClient.Invokes(clienttesting.CreateActionImpl{
ActionImpl: clienttesting.ActionImpl{

View File

@@ -4,8 +4,6 @@ import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "open-cluster-management.io/api/cluster/v1"
)
@@ -101,14 +99,6 @@ func TestFindTaintByKey(t *testing.T) {
}
}
func getApplyFileNames(applyFiles map[string]runtime.Object) []string {
keys := []string{}
for key := range applyFiles {
keys = append(keys, key)
}
return keys
}
var (
UnavailableTaint = clusterv1.Taint{
Key: clusterv1.ManagedClusterTaintUnavailable,

View File

@@ -244,7 +244,7 @@ func TestDiscoveryController_Sync(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objs := []runtime.Object{}
var objs []runtime.Object
if c.cluster != nil {
objs = append(objs, c.cluster)
}

View File

@@ -19,7 +19,7 @@ import (
clusterlisterv1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1"
clusterv1 "open-cluster-management.io/api/cluster/v1"
patcher "open-cluster-management.io/ocm/pkg/common/patcher"
"open-cluster-management.io/ocm/pkg/common/patcher"
"open-cluster-management.io/ocm/pkg/common/queue"
)
@@ -83,7 +83,7 @@ func (c *managedClusterAddOnHealthCheckController) sync(ctx context.Context, syn
return err
}
errs := []error{}
var errs []error
patcher := patcher.NewPatcher[
*addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus](
c.addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName),

View File

@@ -76,7 +76,7 @@ func (c *clusterroleController) sync(ctx context.Context, syncCtx factory.SyncCo
return err
}
errs := []error{}
var errs []error
// Clean up managedcluser cluserroles if there are no managed clusters
if len(managedClusters) == 0 {
results := resourceapply.DeleteAll(

View File

@@ -101,7 +101,7 @@ func (c *leaseController) sync(ctx context.Context, syncCtx factory.SyncContext)
Labels: map[string]string{clusterv1.ClusterNameLabelKey: cluster.Name},
},
Spec: coordv1.LeaseSpec{
HolderIdentity: pointer.StringPtr(leaseName),
HolderIdentity: pointer.String(leaseName),
RenewTime: &metav1.MicroTime{Time: time.Now()},
},
}

View File

@@ -155,7 +155,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
},
}
errs := []error{}
var errs []error
_, _, err = resourceapply.ApplyNamespace(ctx, c.kubeClient.CoreV1(), syncCtx.Recorder(), namespace)
if err != nil {
errs = append(errs, err)
@@ -203,7 +203,7 @@ func (c *managedClusterController) sync(ctx context.Context, syncCtx factory.Syn
}
func (c *managedClusterController) removeManagedClusterResources(ctx context.Context, managedClusterName string) error {
errs := []error{}
var errs []error
// Clean up managed cluster manifests
assetFn := helpers.ManagedClusterAssetFn(manifestFiles, managedClusterName)
resourceResults := resourceapply.DeleteAll(ctx, resourceapply.NewKubeClientHolder(c.kubeClient), c.eventRecorder, assetFn, staticFiles...)

View File

@@ -28,6 +28,12 @@ import (
"open-cluster-management.io/ocm/pkg/common/queue"
)
const (
// TODO move these to api repos
ReasonClusterSelected = "ClustersSelected"
ReasonNoClusterMatchced = "NoClusterMatched"
)
// managedClusterSetController reconciles instances of ManagedClusterSet on the hub.
type managedClusterSetController struct {
patcher patcher.Patcher[*clusterv1beta2.ManagedClusterSet, clusterv1beta2.ManagedClusterSetSpec, clusterv1beta2.ManagedClusterSetStatus]
@@ -157,11 +163,11 @@ func (c *managedClusterSetController) syncClusterSet(ctx context.Context, origin
}
if count == 0 {
emptyCondition.Status = metav1.ConditionTrue
emptyCondition.Reason = "NoClusterMatched"
emptyCondition.Reason = ReasonNoClusterMatchced
emptyCondition.Message = "No ManagedCluster selected"
} else {
emptyCondition.Status = metav1.ConditionFalse
emptyCondition.Reason = "ClustersSelected"
emptyCondition.Reason = ReasonClusterSelected
emptyCondition.Message = fmt.Sprintf("%d ManagedClusters selected", count)
}
meta.SetStatusCondition(&clusterSet.Status.Conditions, emptyCondition)
@@ -207,9 +213,9 @@ func (c *managedClusterSetController) enqueueUpdateClusterClusterSet(oldCluster,
}
// getDiffClusterSetsNames return the diff clustersets names
func getDiffClusterSetsNames(oldSets, newSets []*clusterv1beta2.ManagedClusterSet) sets.String {
oldSetsMap := sets.NewString()
newSetsMap := sets.NewString()
func getDiffClusterSetsNames(oldSets, newSets []*clusterv1beta2.ManagedClusterSet) sets.Set[string] {
oldSetsMap := sets.New[string]()
newSetsMap := sets.New[string]()
for _, oldSet := range oldSets {
oldSetsMap.Insert(oldSet.Name)

View File

@@ -43,7 +43,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionFalse,
Reason: "ClustersSelected",
Reason: ReasonClusterSelected,
Message: "1 ManagedClusters selected",
},
},
@@ -67,7 +67,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionFalse,
Reason: "ClustersSelected",
Reason: ReasonClusterSelected,
Message: "1 ManagedClusters selected",
},
},
@@ -91,7 +91,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionTrue,
Reason: "NoClusterMatched",
Reason: ReasonNoClusterMatchced,
Message: "No ManagedCluster selected",
},
},
@@ -125,7 +125,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionFalse,
Reason: "ClustersSelected",
Reason: ReasonClusterSelected,
Message: "2 ManagedClusters selected",
},
},
@@ -155,7 +155,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionFalse,
Reason: "ClustersSelected",
Reason: ReasonClusterSelected,
Message: "2 ManagedClusters selected",
},
},
@@ -184,7 +184,7 @@ func TestSyncClusterSet(t *testing.T) {
expectCondition: metav1.Condition{
Type: clusterv1beta2.ManagedClusterSetConditionEmpty,
Status: metav1.ConditionTrue,
Reason: "NoClusterMatched",
Reason: ReasonNoClusterMatchced,
Message: "No ManagedCluster selected",
},
},
@@ -206,7 +206,7 @@ func TestSyncClusterSet(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
for _, cluster := range c.existingClusters {
objects = append(objects, cluster)
}
@@ -255,7 +255,7 @@ func TestSyncClusterSet(t *testing.T) {
t.Errorf("Failed to get clusterset: %v, error: %v", c.existingClusterSet.Name, err)
}
if !hasCondition(updatedSet.Status.Conditions, c.expectCondition) {
t.Errorf("expected conditon:%v. is not found: %v", c.expectCondition, updatedSet.Status.Conditions)
t.Errorf("expected condition:%v. is not found: %v", c.expectCondition, updatedSet.Status.Conditions)
}
})
}
@@ -266,7 +266,7 @@ func TestGetDiffClustersets(t *testing.T) {
name string
oldSets []*clusterv1beta2.ManagedClusterSet
newSets []*clusterv1beta2.ManagedClusterSet
expectDiffSet sets.String
expectDiffSet sets.Set[string]
}{
{
name: "update a set",
@@ -276,7 +276,7 @@ func TestGetDiffClustersets(t *testing.T) {
newSets: []*clusterv1beta2.ManagedClusterSet{
newManagedClusterSet("s1"), newManagedClusterSet("s3"),
},
expectDiffSet: sets.NewString("s2", "s3"),
expectDiffSet: sets.New[string]("s2", "s3"),
},
{
name: "add a set",
@@ -286,7 +286,7 @@ func TestGetDiffClustersets(t *testing.T) {
newSets: []*clusterv1beta2.ManagedClusterSet{
newManagedClusterSet("s1"), newManagedClusterSet("s2"),
},
expectDiffSet: sets.NewString("s2"),
expectDiffSet: sets.New[string]("s2"),
},
{
name: "delete a set",
@@ -296,7 +296,7 @@ func TestGetDiffClustersets(t *testing.T) {
newSets: []*clusterv1beta2.ManagedClusterSet{
newManagedClusterSet("s1"),
},
expectDiffSet: sets.NewString("s2"),
expectDiffSet: sets.New[string]("s2"),
},
{
name: "old set is nil",
@@ -304,7 +304,7 @@ func TestGetDiffClustersets(t *testing.T) {
newSets: []*clusterv1beta2.ManagedClusterSet{
newManagedClusterSet("s1"),
},
expectDiffSet: sets.NewString("s1"),
expectDiffSet: sets.New[string]("s1"),
},
{
name: "new set is nil",
@@ -312,7 +312,7 @@ func TestGetDiffClustersets(t *testing.T) {
newManagedClusterSet("s1"),
},
newSets: []*clusterv1beta2.ManagedClusterSet{},
expectDiffSet: sets.NewString("s1"),
expectDiffSet: sets.New[string]("s1"),
},
}
@@ -368,7 +368,7 @@ func TestEnqueueUpdateClusterClusterSet(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
for _, clusterset := range c.existingClusterSets {
objects = append(objects, clusterset)

View File

@@ -71,8 +71,9 @@ func TestSyncDefaultClusterSet(t *testing.T) {
},
},
{
name: "sync default cluster set with disabled annotation",
existingClusterSet: newDefaultManagedClusterSetWithAnnotation(DefaultManagedClusterSetName, autoUpdateAnnotation, "false", DefaultManagedClusterSet.Spec, false),
name: "sync default cluster set with disabled annotation",
existingClusterSet: newDefaultManagedClusterSetWithAnnotation(
DefaultManagedClusterSetName, autoUpdateAnnotation, "false", DefaultManagedClusterSet.Spec, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertNoActions(t, actions)
},
@@ -81,7 +82,7 @@ func TestSyncDefaultClusterSet(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existingClusterSet != nil {
objects = append(objects, c.existingClusterSet)
@@ -128,7 +129,8 @@ func newDefaultManagedClusterSet(name string, spec clusterv1beta2.ManagedCluster
return clusterSet
}
func newDefaultManagedClusterSetWithAnnotation(name string, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet {
func newDefaultManagedClusterSetWithAnnotation(
name, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet {
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,

View File

@@ -64,8 +64,9 @@ func TestSyncGlobalClusterSet(t *testing.T) {
},
},
{
name: "sync global cluster set with disabled annotation",
existingClusterSet: newGlobalManagedClusterSetWithAnnotation(GlobalManagedClusterSetName, autoUpdateAnnotation, "false", GlobalManagedClusterSet.Spec, false),
name: "sync global cluster set with disabled annotation",
existingClusterSet: newGlobalManagedClusterSetWithAnnotation(
GlobalManagedClusterSetName, autoUpdateAnnotation, "false", GlobalManagedClusterSet.Spec, false),
validateActions: func(t *testing.T, actions []clienttesting.Action) {
testingcommon.AssertNoActions(t, actions)
},
@@ -74,7 +75,7 @@ func TestSyncGlobalClusterSet(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existingClusterSet != nil {
objects = append(objects, c.existingClusterSet)
@@ -120,7 +121,8 @@ func newGlobalManagedClusterSet(name string, spec clusterv1beta2.ManagedClusterS
return clusterSet
}
func newGlobalManagedClusterSetWithAnnotation(name string, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet {
func newGlobalManagedClusterSetWithAnnotation(
name, k, v string, spec clusterv1beta2.ManagedClusterSetSpec, terminating bool) *clusterv1beta2.ManagedClusterSet {
clusterSet := &clusterv1beta2.ManagedClusterSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,

View File

@@ -91,7 +91,7 @@ func TestSync(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
objects = append(objects, c.clusterSets...)
objects = append(objects, c.clusterSetBinding)
@@ -154,7 +154,7 @@ func TestEnqueue(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
objects = append(objects, c.clusterSet)
objects = append(objects, c.clusterSetBindings...)

View File

@@ -173,7 +173,7 @@ func removeFinalizer(obj runtime.Object, finalizerName string) bool {
return false
}
newFinalizers := []string{}
var newFinalizers []string
accessor, _ := meta.Accessor(obj)
found := false
for _, finalizer := range accessor.GetFinalizers() {

View File

@@ -170,7 +170,7 @@ func TestSyncRoleAndRoleBinding(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.roleBinding != nil {
objects = append(objects, c.roleBinding)
}

View File

@@ -3,6 +3,7 @@ package addon
import (
"testing"
certificates "k8s.io/api/certificates/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
@@ -10,8 +11,11 @@ import (
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
)
const (
addOnName = "addon1"
)
func TestGetRegistrationConfigs(t *testing.T) {
addOnName := "addon1"
addOnNamespace := "ns1"
cases := []struct {
@@ -44,13 +48,13 @@ func TestGetRegistrationConfigs(t *testing.T) {
Status: addonv1alpha1.ManagedClusterAddOnStatus{
Registrations: []addonv1alpha1.RegistrationConfig{
{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
},
},
},
},
configs: []registrationConfig{
newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, false),
newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, false),
},
},
{
@@ -64,14 +68,14 @@ func TestGetRegistrationConfigs(t *testing.T) {
Status: addonv1alpha1.ManagedClusterAddOnStatus{
Registrations: []addonv1alpha1.RegistrationConfig{
{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
},
},
Namespace: addOnNamespace,
},
},
configs: []registrationConfig{
newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, false),
newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, false),
},
},
{
@@ -90,13 +94,13 @@ func TestGetRegistrationConfigs(t *testing.T) {
Status: addonv1alpha1.ManagedClusterAddOnStatus{
Registrations: []addonv1alpha1.RegistrationConfig{
{
SignerName: "kubernetes.io/kube-apiserver-client",
SignerName: certificates.KubeAPIServerClientSignerName,
},
},
},
},
configs: []registrationConfig{
newRegistrationConfig(addOnName, addOnNamespace, "kubernetes.io/kube-apiserver-client", "", nil, true),
newRegistrationConfig(addOnName, addOnNamespace, certificates.KubeAPIServerClientSignerName, "", nil, true),
},
},
{

View File

@@ -114,7 +114,7 @@ func (c *addOnRegistrationController) sync(ctx context.Context, syncCtx factory.
}
// handle resync
errs := []error{}
var errs []error
for addOnName := range c.addOnRegistrationConfigs {
_, err := c.hubAddOnLister.ManagedClusterAddOns(c.clusterName).Get(addOnName)
if err == nil {
@@ -158,7 +158,7 @@ func (c *addOnRegistrationController) syncAddOn(ctx context.Context, syncCtx fac
}
// stop registration for the stale registration configs
errs := []error{}
var errs []error
for hash, cachedConfig := range cachedConfigs {
if _, ok := configs[hash]; ok {
continue
@@ -202,7 +202,7 @@ func (c *addOnRegistrationController) startRegistration(ctx context.Context, con
// the addon agent runs outside the managed cluster, for more details see the hosted mode design docs for addon:
// https://github.com/open-cluster-management-io/enhancements/pull/65), it generate the secret on the
// management(hosting) cluster
var kubeClient kubernetes.Interface = c.spokeKubeClient
kubeClient := c.spokeKubeClient
if config.AgentRunningOutsideManagedCluster {
kubeClient = c.managementKubeClient
}
@@ -298,7 +298,7 @@ func (c *addOnRegistrationController) stopRegistration(ctx context.Context, conf
config.stopFunc()
}
var kubeClient kubernetes.Interface = c.spokeKubeClient
kubeClient := c.spokeKubeClient
if config.AgentRunningOutsideManagedCluster {
// delete the secret generated on the management cluster
kubeClient = c.managementKubeClient
@@ -315,7 +315,7 @@ func (c *addOnRegistrationController) stopRegistration(ctx context.Context, conf
// cleanup cleans both the registration configs and client certificate controllers for the addon
func (c *addOnRegistrationController) cleanup(ctx context.Context, addOnName string) error {
errs := []error{}
var errs []error
for _, config := range c.addOnRegistrationConfigs[addOnName] {
if err := c.stopRegistration(ctx, config); err != nil {
errs = append(errs, err)

View File

@@ -23,7 +23,6 @@ import (
func TestFilterCSREvents(t *testing.T) {
clusterName := "cluster1"
addonName := "addon1"
signerName := "signer1"
cases := []struct {
@@ -50,7 +49,7 @@ func TestFilterCSREvents(t *testing.T) {
Labels: map[string]string{
// the labels are only hints. Anyone could set/modify them.
clusterv1.ClusterNameLabelKey: clusterName,
addonv1alpha1.AddonLabelKey: addonName,
addonv1alpha1.AddonLabelKey: addOnName,
},
},
Spec: certificates.CertificateSigningRequestSpec{
@@ -63,7 +62,7 @@ func TestFilterCSREvents(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
filterFunc := createCSREventFilterFunc(clusterName, addonName, signerName)
filterFunc := createCSREventFilterFunc(clusterName, addOnName, signerName)
actual := filterFunc(c.csr)
if actual != c.expected {
t.Errorf("Expected %v but got %v", c.expected, actual)
@@ -74,7 +73,6 @@ func TestFilterCSREvents(t *testing.T) {
func TestRegistrationSync(t *testing.T) {
clusterName := "cluster1"
addonName := "addon1"
signerName := "signer1"
config1 := addonv1alpha1.RegistrationConfig{
@@ -84,7 +82,7 @@ func TestRegistrationSync(t *testing.T) {
config2 := addonv1alpha1.RegistrationConfig{
SignerName: signerName,
Subject: addonv1alpha1.Subject{
User: addonName,
User: addOnName,
},
}
@@ -99,8 +97,8 @@ func TestRegistrationSync(t *testing.T) {
}{
{
name: "addon registration not enabled",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName, nil, false),
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName, nil, false),
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 0 {
t.Errorf("expect 0 actions but got %d", len(actions))
@@ -112,11 +110,11 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "addon registration enabled",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config1}, false),
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config1, "", false)},
addOnName: {hash(config1, "", false)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 0 {
@@ -126,21 +124,21 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "addon registration updated",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config2}, false),
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config1, "", false): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
},
},
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config2, "", false)},
addOnName: {hash(config2, "", false)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 1 {
@@ -151,21 +149,21 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "addon install namespace updated",
queueKey: addonName,
addOn: setAddonInstallNamespace(newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: setAddonInstallNamespace(newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config2}, false), "ns1"),
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config2, "", false): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
},
},
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config2, "ns1", false)},
addOnName: {hash(config2, "ns1", false)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 1 {
@@ -176,13 +174,13 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "addon is deleted",
queueKey: addonName,
queueKey: addOnName,
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config1, "", false): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
},
},
},
@@ -196,10 +194,10 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "hosted addon registration enabled",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName, []addonv1alpha1.RegistrationConfig{config1}, true),
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName, []addonv1alpha1.RegistrationConfig{config1}, true),
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config1, "", true)},
addOnName: {hash(config1, "", true)},
},
addonAgentOutsideManagedCluster: true,
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
@@ -213,23 +211,23 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "hosted addon registration updated",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config2}, true),
addonAgentOutsideManagedCluster: true,
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config1, "", true): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
AgentRunningOutsideManagedCluster: true,
},
},
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config2, "", true)},
addOnName: {hash(config2, "", true)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 0 {
@@ -243,12 +241,12 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "deploy mode changes from hosted to default",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config2}, false),
addonAgentOutsideManagedCluster: false,
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config2, "", true): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
@@ -258,7 +256,7 @@ func TestRegistrationSync(t *testing.T) {
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config2, "", false)},
addOnName: {hash(config2, "", false)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 0 {
@@ -272,23 +270,23 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "deploy mode changes from default to hosted",
queueKey: addonName,
addOn: newManagedClusterAddOn(clusterName, addonName,
queueKey: addOnName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config2}, true),
addonAgentOutsideManagedCluster: true,
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config2, "", false): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
AgentRunningOutsideManagedCluster: false,
},
},
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config2, "", true)},
addOnName: {hash(config2, "", true)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(managementActions) != 0 {
@@ -302,13 +300,13 @@ func TestRegistrationSync(t *testing.T) {
},
{
name: "hosted addon is deleted",
queueKey: addonName,
queueKey: addOnName,
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config1, "", true): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
AgentRunningOutsideManagedCluster: true,
},
},
@@ -324,14 +322,14 @@ func TestRegistrationSync(t *testing.T) {
{
name: "resync",
queueKey: factory.DefaultQueueKey,
addOn: newManagedClusterAddOn(clusterName, addonName,
addOn: newManagedClusterAddOn(clusterName, addOnName,
[]addonv1alpha1.RegistrationConfig{config1}, false),
addOnRegistrationConfigs: map[string]map[string]registrationConfig{
addonName: {
addOnName: {
hash(config1, "", false): {
secretName: "secret1",
addonInstallOption: addonInstallOption{
InstallationNamespace: addonName,
InstallationNamespace: addOnName,
},
},
},
@@ -345,7 +343,7 @@ func TestRegistrationSync(t *testing.T) {
},
},
expectedAddOnRegistrationConfigHashs: map[string][]string{
addonName: {hash(config1, "", false)},
addOnName: {hash(config1, "", false)},
},
validateActions: func(t *testing.T, actions, managementActions []clienttesting.Action) {
if len(actions) != 1 {
@@ -360,7 +358,7 @@ func TestRegistrationSync(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
kubeClient := kubefake.NewSimpleClientset()
managementClient := kubefake.NewSimpleClientset()
addons := []runtime.Object{}
var addons []runtime.Object
if c.addOn != nil {
addons = append(addons, c.addOn)
}

View File

@@ -41,7 +41,8 @@ func TestLeaseUpdate(t *testing.T) {
clusters: []runtime.Object{},
needToStartUpdateBefore: true,
validateActions: testingcommon.AssertNoMoreUpdates,
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " +
"managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
},
{
name: "unaccept a managed cluster after lease update routine is started",

View File

@@ -45,8 +45,7 @@ func (r *claimReconcile) reconcile(ctx context.Context, cluster *clusterv1.Manag
// managed cluster on hub. Some of the customized claims might not be exposed once
// the total number of the claims exceeds the value of `cluster-claims-max`.
func (r *claimReconcile) exposeClaims(ctx context.Context, cluster *clusterv1.ManagedCluster) error {
reservedClaims := []clusterv1.ManagedClusterClaim{}
customClaims := []clusterv1.ManagedClusterClaim{}
var reservedClaims, customClaims []clusterv1.ManagedClusterClaim
// clusterClaim with label `open-cluster-management.io/spoke-only` will not be synced to managedCluster.Status at hub.
requirement, _ := labels.NewRequirement(labelCustomizedOnly, selection.DoesNotExist, []string{})

View File

@@ -38,7 +38,8 @@ func TestSync(t *testing.T) {
{
name: "sync no managed cluster",
validateActions: testingcommon.AssertNoActions,
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
expectedErr: "unable to get managed cluster \"testmanagedcluster\" " +
"from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
},
{
name: "skip when managed cluster does not join the hub yet",
@@ -87,7 +88,7 @@ func TestSync(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.cluster != nil {
objects = append(objects, c.cluster)
}

View File

@@ -32,7 +32,8 @@ func TestSyncManagedCluster(t *testing.T) {
name: "sync no managed cluster",
startingObjects: []runtime.Object{},
validateActions: testingcommon.AssertNoActions,
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " +
"managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
},
{
name: "sync an unaccepted managed cluster",

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version"
discovery "k8s.io/client-go/discovery"
"k8s.io/client-go/discovery"
kubeinformers "k8s.io/client-go/informers"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
@@ -90,7 +90,8 @@ func TestHealthCheck(t *testing.T) {
validateActions: func(t *testing.T, clusterClient *clusterfake.Clientset) {
testingcommon.AssertNoActions(t, clusterClient.Actions())
},
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
expectedErr: "unable to get managed cluster \"testmanagedcluster\" from hub: " +
"managedcluster.cluster.open-cluster-management.io \"testmanagedcluster\" not found",
},
{
name: "kube-apiserver is not health",

View File

@@ -8,7 +8,7 @@ import (
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/events"
"k8s.io/apimachinery/pkg/util/errors"
discovery "k8s.io/client-go/discovery"
"k8s.io/client-go/discovery"
corev1informers "k8s.io/client-go/informers/core/v1"
clientset "open-cluster-management.io/api/client/cluster/clientset/versioned"

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -91,11 +90,11 @@ func DumpSecret(
// create/update files from the secret
for key, data := range secret.Data {
filename := path.Clean(path.Join(outputDir, key))
lastData, err := ioutil.ReadFile(filepath.Clean(filename))
lastData, err := os.ReadFile(filepath.Clean(filename))
switch {
case os.IsNotExist(err):
// create file
if err := ioutil.WriteFile(filename, data, 0600); err != nil {
if err := os.WriteFile(filename, data, 0600); err != nil {
return fmt.Errorf("unable to write file %q: %w", filename, err)
}
recorder.Event("FileCreated", fmt.Sprintf("File %q is created from secret %s/%s", filename, secretNamespace, secretName))
@@ -106,7 +105,7 @@ func DumpSecret(
continue
default:
// update file
if err := ioutil.WriteFile(path.Clean(filename), data, 0600); err != nil {
if err := os.WriteFile(path.Clean(filename), data, 0600); err != nil {
return fmt.Errorf("unable to write file %q: %w", filename, err)
}
recorder.Event("FileUpdated", fmt.Sprintf("File %q is updated from secret %s/%s", filename, secretNamespace, secretName))

View File

@@ -3,7 +3,6 @@ package registration
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
@@ -24,11 +23,16 @@ const (
)
func TestDumpSecret(t *testing.T) {
testDir, err := ioutil.TempDir("", "dumpsecret")
testDir, err := os.MkdirTemp("", "dumpsecret")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.RemoveAll(testDir)
defer func() {
err := os.RemoveAll(testDir)
if err != nil {
t.Fatal(err)
}
}()
kubeConfigFile := testinghelpers.NewKubeconfig(nil, nil)
@@ -44,7 +48,7 @@ func TestDumpSecret(t *testing.T) {
queueKey: "",
secret: testinghelpers.NewHubKubeconfigSecret("irrelevant", "irrelevant", "", nil, map[string][]byte{}),
validateFiles: func(t *testing.T, hubKubeconfigDir string) {
files, err := ioutil.ReadDir(hubKubeconfigDir)
files, err := os.ReadDir(hubKubeconfigDir)
if err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@@ -122,7 +122,9 @@ func TestMergeManifestConditions(t *testing.T) {
newManifestCondition(0, "resource1", newCondition("two", "False", "my-reason", "my-message", nil)),
},
expectedConditions: []workapiv1.ManifestCondition{
newManifestCondition(0, "resource1", newCondition("one", "True", "my-reason", "my-message", nil), newCondition("two", "False", "my-reason", "my-message", nil)),
newManifestCondition(0, "resource1",
newCondition("one", "True", "my-reason", "my-message", nil),
newCondition("two", "False", "my-reason", "my-message", nil)),
},
},
{

View File

@@ -50,7 +50,7 @@ type ManifestWorkReplicaSetController struct {
reconcilers []ManifestWorkReplicaSetReconcile
}
// manifestWorkReplicaSetReconcile is a interface for reconcile logic. It returns an updated manifestWorkReplicaSet and whether further
// ManifestWorkReplicaSetReconcile is a interface for reconcile logic. It returns an updated manifestWorkReplicaSet and whether further
// reconcile needs to proceed.
type ManifestWorkReplicaSetReconcile interface {
reconcile(ctx context.Context, pw *workapiv1alpha1.ManifestWorkReplicaSet) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error)

View File

@@ -144,7 +144,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) {
},
},
{
name: "no additonal apply needed",
name: "no additional apply needed",
mwrSet: func() *workapiv1alpha1.ManifestWorkReplicaSet {
w := helpertest.CreateTestManifestWorkReplicaSet("test", "default", "placement")
w.Finalizers = []string{ManifestWorkReplicaSetFinalizer}
@@ -213,15 +213,27 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) {
workObjects = append(workObjects, c.works...)
fakeClient := fakeworkclient.NewSimpleClientset(workObjects...)
workInformers := workinformers.NewSharedInformerFactory(fakeClient, 10*time.Minute)
workInformers.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(c.mwrSet)
err := workInformers.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(c.mwrSet)
if err != nil {
t.Fatal(err)
}
for _, o := range c.works {
workInformers.Work().V1().ManifestWorks().Informer().GetStore().Add(o)
err = workInformers.Work().V1().ManifestWorks().Informer().GetStore().Add(o)
if err != nil {
t.Fatal(err)
}
}
fakeClusterClient := fakeclusterclient.NewSimpleClientset(c.placement, c.decision)
clusterInformers := clusterinformers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute)
clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(c.placement)
clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(c.decision)
err = clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(c.placement)
if err != nil {
t.Fatal(err)
}
err = clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(c.decision)
if err != nil {
t.Fatal(err)
}
ctrl := newController(
fakeClient,
@@ -232,7 +244,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) {
)
controllerContext := testingcommon.NewFakeSyncContext(t, c.mwrSet.Namespace+"/"+c.mwrSet.Name)
err := ctrl.sync(context.TODO(), controllerContext)
err = ctrl.sync(context.TODO(), controllerContext)
if err != nil {
t.Error(err)
}

View File

@@ -39,7 +39,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
return mwrSet, reconcileStop, nil
}
if err != nil {
return mwrSet, reconcileContinue, fmt.Errorf("Failed get placement %w", err)
return mwrSet, reconcileContinue, fmt.Errorf("failed get placement %w", err)
}
placements = append(placements, placement)
}
@@ -49,7 +49,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
return mwrSet, reconcileContinue, err
}
errs := []error{}
var errs []error
addedClusters, deletedClusters, existingClusters := sets.New[string](), sets.New[string](), sets.New[string]()
for _, mw := range manifestWorks {
existingClusters.Insert(mw.Namespace)
@@ -127,7 +127,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
return mwrSet, reconcileContinue, utilerrors.NewAggregate(errs)
}
// Return only True status if there all clusters have manifests applied as expected
// GetManifestworkApplied return only True status if there all clusters have manifests applied as expected
func GetManifestworkApplied(reason string, message string) metav1.Condition {
if reason == workapiv1alpha1.ReasonAsExpected {
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied, reason, message, metav1.ConditionTrue)
@@ -137,7 +137,7 @@ func GetManifestworkApplied(reason string, message string) metav1.Condition {
}
// Return only True status if there are clusters selected
// GetPlacementDecisionVerified return only True status if there are clusters selected
func GetPlacementDecisionVerified(reason string, message string) metav1.Condition {
if reason == workapiv1alpha1.ReasonAsExpected {
return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionTrue)
@@ -158,7 +158,7 @@ func getCondition(conditionType string, reason string, message string, status me
func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterNS string) (*workv1.ManifestWork, error) {
if clusterNS == "" {
return nil, fmt.Errorf("Invalid cluster namespace")
return nil, fmt.Errorf("invalid cluster namespace")
}
return &workv1.ManifestWork{

View File

@@ -73,7 +73,7 @@ func TestDeployReconcileAsExpected(t *testing.T) {
}
// Check the PlacedManifestWork conditions
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified))
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)
if placeCondition == nil {
t.Fatal("Placement condition not found ", mwrSet.Status.Conditions)
@@ -137,7 +137,7 @@ func TestDeployReconcileAsPlacementDecisionEmpty(t *testing.T) {
}
// Check the PlacedManifestWork conditions
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified))
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)
if placeCondition == nil {
t.Fatal("Placement condition not found ", mwrSet.Status.Conditions)
@@ -184,7 +184,7 @@ func TestDeployReconcileAsPlacementNotExist(t *testing.T) {
}
// Check the PlacedManifestWork conditions
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, string(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified))
placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified)
if placeCondition == nil {
t.Fatal("Placement condition not found ", mwrSet.Status.Conditions)

View File

@@ -43,15 +43,15 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp
return mwrSet, reconcileStop, nil
}
func (m *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error {
manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, m.manifestWorkLister)
func (f *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error {
manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, f.manifestWorkLister)
if err != nil {
return err
}
errs := []error{}
var errs []error
for _, mw := range manifestWorks {
err = m.workApplier.Delete(ctx, mw.Namespace, mw.Name)
err = f.workApplier.Delete(ctx, mw.Namespace, mw.Name)
if err != nil && !errors.IsNotFound(err) {
errs = append(errs, err)
}

View File

@@ -27,7 +27,7 @@ func TestCreateOnlyApply(t *testing.T) {
}{
{
name: "create a non exist object",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: nil,
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
@@ -40,14 +40,14 @@ func TestCreateOnlyApply(t *testing.T) {
t.Errorf("Expect 1 owners, but have %d", len(owners))
}
if owners[0].UID != "testowner" {
if owners[0].UID != defaultOwner {
t.Errorf("Owner UId is not correct, got %s", owners[0].UID)
}
},
},
{
name: "create an already existing object",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
@@ -64,7 +64,7 @@ func TestCreateOnlyApply(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}

View File

@@ -39,7 +39,7 @@ func (c *ServerSideApply) Apply(
ctx context.Context,
gvr schema.GroupVersionResource,
required *unstructured.Unstructured,
owner metav1.OwnerReference,
_ metav1.OwnerReference,
applyOption *workapiv1.ManifestConfigOption,
recorder events.Recorder) (runtime.Object, error) {

View File

@@ -22,6 +22,8 @@ import (
"open-cluster-management.io/ocm/pkg/work/spoke/spoketesting"
)
const defaultOwner = "test-owner"
func TestServerSideApply(t *testing.T) {
cases := []struct {
name string
@@ -34,7 +36,7 @@ func TestServerSideApply(t *testing.T) {
}{
{
name: "server side apply successfully",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: nil,
required: spoketesting.NewUnstructured("v1", "Namespace", "", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "namespaces"},
@@ -42,7 +44,7 @@ func TestServerSideApply(t *testing.T) {
},
{
name: "server side apply successfully conflict",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
@@ -55,7 +57,7 @@ func TestServerSideApply(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}

View File

@@ -47,15 +47,19 @@ func TestIsSameUnstructured(t *testing.T) {
expected: false,
},
{
name: "different spec",
obj1: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}),
obj2: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}}),
name: "different spec",
obj1: spoketesting.NewUnstructuredWithContent(
"v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}),
obj2: spoketesting.NewUnstructuredWithContent(
"v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}}),
expected: false,
},
{
name: "same spec, different status",
obj1: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status1"}),
obj2: spoketesting.NewUnstructuredWithContent("v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status2"}),
name: "same spec, different status",
obj1: spoketesting.NewUnstructuredWithContent(
"v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status1"}),
obj2: spoketesting.NewUnstructuredWithContent(
"v1", "Kind1", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}, "status": "status2"}),
expected: true,
},
}
@@ -81,7 +85,7 @@ func TestApplyUnstructred(t *testing.T) {
}{
{
name: "create a new object with owner",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
@@ -92,7 +96,7 @@ func TestApplyUnstructred(t *testing.T) {
t.Errorf("Expect 1 owners, but have %d", len(owners))
}
if owners[0].UID != "testowner" {
if owners[0].UID != defaultOwner {
t.Errorf("Owner UId is not correct, got %s", owners[0].UID)
}
},
@@ -120,7 +124,7 @@ func TestApplyUnstructred(t *testing.T) {
name: "update an object owner",
existing: spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test1", UID: "testowner1"}),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
@@ -139,7 +143,7 @@ func TestApplyUnstructred(t *testing.T) {
if owners[0].UID != "testowner1" {
t.Errorf("Owner UId is not correct, got %s", owners[0].UID)
}
if owners[1].UID != "testowner" {
if owners[1].UID != defaultOwner {
t.Errorf("Owner UId is not correct, got %s", owners[1].UID)
}
},
@@ -160,7 +164,7 @@ func TestApplyUnstructred(t *testing.T) {
{
name: "remove an object owner",
existing: spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"}),
"v1", "Secret", "ns1", "test", metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner}),
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner-"},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
@@ -285,7 +289,7 @@ func TestApplyUnstructred(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}
@@ -318,7 +322,7 @@ func TestUpdateApplyKube(t *testing.T) {
}{
{
name: "apply non exist object using kube client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
@@ -331,7 +335,7 @@ func TestUpdateApplyKube(t *testing.T) {
},
{
name: "apply existing object using kube client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: spoketesting.NewSecretWithType("test", "ns1", "foo", corev1.SecretTypeOpaque),
required: spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"),
gvr: schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
@@ -353,7 +357,7 @@ func TestUpdateApplyKube(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}
@@ -404,14 +408,14 @@ func TestUpdateApplyDynamic(t *testing.T) {
}{
{
name: "apply non exist object using dynamic client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
required: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"),
gvr: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"},
ownerApplied: true,
},
{
name: "apply existing object using dynamic client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"),
required: spoketesting.NewUnstructured("monitoring.coreos.com/v1", "ServiceMonitor", "ns1", "test"),
gvr: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"},
@@ -421,7 +425,7 @@ func TestUpdateApplyDynamic(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}
@@ -473,7 +477,7 @@ func TestUpdateApplyApiExtension(t *testing.T) {
}{
{
name: "apply non exist object using api extension client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
required: spoketesting.NewUnstructured("apiextensions.k8s.io/v1", "CustomResourceDefinition", "", "testcrd"),
gvr: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinition"},
validateActions: func(t *testing.T, actions []clienttesting.Action) {
@@ -486,7 +490,7 @@ func TestUpdateApplyApiExtension(t *testing.T) {
},
{
name: "apply existing object using api extension client",
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: "testowner"},
owner: metav1.OwnerReference{APIVersion: "v1", Name: "test", UID: defaultOwner},
existing: newCRD("testcrd"),
required: spoketesting.NewUnstructured("apiextensions.k8s.io/v1", "CustomResourceDefinition", "", "testcrd"),
gvr: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinition"},
@@ -502,7 +506,7 @@ func TestUpdateApplyApiExtension(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
objects := []runtime.Object{}
var objects []runtime.Object
if c.existing != nil {
objects = append(objects, c.existing)
}

View File

@@ -140,7 +140,9 @@ func TestValidateEscalation(t *testing.T) {
namespace: "test-deny",
name: "test",
obj: spoketesting.NewUnstructured("v1", "ClusterRole", "", "test"),
expect: fmt.Errorf("not allowed to apply the resource rbac.authorization.k8s.io roles, test-deny test, error: permission escalation, will try again in 1m0s"),
expect: fmt.Errorf(
"not allowed to apply the resource rbac.authorization.k8s.io roles, " +
"test-deny test, error: permission escalation, will try again in 1m0s"),
},
"allow": {
executor: &workapiv1.ManifestWorkExecutor{

View File

@@ -23,6 +23,12 @@ import (
"open-cluster-management.io/ocm/pkg/work/spoke/spoketesting"
)
const (
denyNS = "test-deny"
allowNS = "test-allow"
clusterName = "cluster1"
)
func newExecutorCacheValidator(t *testing.T, ctx context.Context, clusterName string,
kubeClient kubernetes.Interface, manifestWorkObjects ...runtime.Object) *sarCacheValidator {
@@ -84,7 +90,7 @@ func TestValidate(t *testing.T) {
},
},
},
namespace: "test-deny",
namespace: denyNS,
name: "test",
expect: fmt.Errorf("not allowed to apply the resource secrets, test-deny test, will try again in 1m0s"),
},
@@ -98,7 +104,7 @@ func TestValidate(t *testing.T) {
},
},
},
namespace: "test-allow",
namespace: allowNS,
name: "test",
expect: nil,
},
@@ -110,7 +116,7 @@ func TestValidate(t *testing.T) {
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview)
if obj.Spec.ResourceAttributes.Namespace == "test-allow" {
if obj.Spec.ResourceAttributes.Namespace == allowNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Allowed: true,
@@ -118,7 +124,7 @@ func TestValidate(t *testing.T) {
}, nil
}
if obj.Spec.ResourceAttributes.Namespace == "test-deny" {
if obj.Spec.ResourceAttributes.Namespace == denyNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Denied: true,
@@ -129,7 +135,6 @@ func TestValidate(t *testing.T) {
},
)
clusterName := "cluster1"
ctx := context.TODO()
cacheValidator := newExecutorCacheValidator(t, ctx, clusterName, kubeClient)
for testName, test := range tests {
@@ -165,13 +170,13 @@ func TestCacheWorks(t *testing.T) {
}{
"forbidden": {
executor: executor,
namespace: "test-deny",
namespace: denyNS,
name: "test",
expect: fmt.Errorf("not allowed to apply the resource secrets, test-deny test, will try again in 1m0s"),
},
"allow": {
executor: executor,
namespace: "test-allow",
namespace: allowNS,
name: "test",
expect: nil,
},
@@ -183,7 +188,7 @@ func TestCacheWorks(t *testing.T) {
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview)
if obj.Spec.ResourceAttributes.Namespace == "test-allow" {
if obj.Spec.ResourceAttributes.Namespace == allowNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Allowed: true,
@@ -191,7 +196,7 @@ func TestCacheWorks(t *testing.T) {
}, nil
}
if obj.Spec.ResourceAttributes.Namespace == "test-deny" {
if obj.Spec.ResourceAttributes.Namespace == denyNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Denied: true,
@@ -202,12 +207,11 @@ func TestCacheWorks(t *testing.T) {
},
)
clusterName := "cluster1"
ctx := context.TODO()
work, _ := spoketesting.NewManifestWork(0,
spoketesting.NewUnstructured("v1", "Secret", "test-allow", "test"),
spoketesting.NewUnstructured("v1", "Secret", "test-deny", "test"),
spoketesting.NewUnstructured("v1", "Secret", allowNS, "test"),
spoketesting.NewUnstructured("v1", "Secret", denyNS, "test"),
)
work.Spec.Executor = executor

View File

@@ -156,7 +156,7 @@ func TestCacheController(t *testing.T) {
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(clienttesting.CreateActionImpl).Object.(*v1.SubjectAccessReview)
if obj.Spec.ResourceAttributes.Namespace == "test-allow" {
if obj.Spec.ResourceAttributes.Namespace == allowNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Allowed: true,
@@ -164,7 +164,7 @@ func TestCacheController(t *testing.T) {
}, nil
}
if obj.Spec.ResourceAttributes.Namespace == "test-deny" {
if obj.Spec.ResourceAttributes.Namespace == denyNS {
return true, &v1.SubjectAccessReview{
Status: v1.SubjectAccessReviewStatus{
Denied: true,
@@ -175,12 +175,11 @@ func TestCacheController(t *testing.T) {
},
)
clusterName := "cluster1"
ctx := context.TODO()
work, _ := spoketesting.NewManifestWork(0,
spoketesting.NewUnstructured("v1", "Secret", "test-allow", "test"),
spoketesting.NewUnstructured("v1", "Secret", "test-deny", "test"),
spoketesting.NewUnstructured("v1", "Secret", allowNS, "test"),
spoketesting.NewUnstructured("v1", "Secret", denyNS, "test"),
)
work.Spec.Executor = executor
work.Spec.DeleteOption = &workapiv1.DeleteOption{
@@ -190,7 +189,7 @@ func TestCacheController(t *testing.T) {
{
Group: "",
Resource: "secrets",
Namespace: "test-allow",
Namespace: allowNS,
Name: "test",
},
},

View File

@@ -71,7 +71,7 @@ func (m *AppliedManifestWorkFinalizeController) sync(ctx context.Context, contro
return m.syncAppliedManifestWork(ctx, controllerContext, appliedManifestWork)
}
// syncAppliedManifestWork ensures that when a appliedmanifestwork has been deleted, everything it created is also deleted.
// syncAppliedManifestWork ensures that when an appliedmanifestwork has been deleted, everything it created is also deleted.
// Foreground deletion is implemented, which means all resources created will be deleted and finalized
// before removing finalizer from appliedmanifestwork
func (m *AppliedManifestWorkFinalizeController) syncAppliedManifestWork(ctx context.Context,

View File

@@ -190,8 +190,7 @@ func TestSyncManifestWorkController(t *testing.T) {
t.Errorf("Expect no sync error, but got %v", err)
}
workAction := []clienttesting.Action{}
appliedWorkAction := []clienttesting.Action{}
var workAction, appliedWorkAction []clienttesting.Action
for _, action := range fakeClient.Actions() {
if action.GetResource().Resource == "manifestworks" {
workAction = append(workAction, action)

View File

@@ -142,7 +142,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
// We creat a ownerref instead of controller ref since multiple controller can declare the ownership of a manifests
owner := helper.NewAppliedManifestWorkOwner(appliedManifestWork)
errs := []error{}
var errs []error
// Apply resources on spoke cluster.
resourceResults := make([]applyResult, len(manifestWork.Spec.Workload.Manifests))
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
@@ -161,7 +161,7 @@ func (m *ManifestWorkController) sync(ctx context.Context, controllerContext fac
klog.Errorf("failed to apply resource with error %v", err)
}
newManifestConditions := []workapiv1.ManifestCondition{}
var newManifestConditions []workapiv1.ManifestCondition
var requeueTime = MaxRequeueDuration
for _, result := range resourceResults {
manifestCondition := workapiv1.ManifestCondition{
@@ -238,7 +238,7 @@ func (m *ManifestWorkController) applyAppliedManifestWork(ctx context.Context, w
Finalizers: []string{controllers.AppliedManifestWorkFinalizer},
},
Spec: workapiv1.AppliedManifestWorkSpec{
HubHash: m.hubHash,
HubHash: hubHash,
ManifestWorkName: workName,
AgentID: agentID,
},

View File

@@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
@@ -15,7 +16,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
fakedynamic "k8s.io/client-go/dynamic/fake"
fakekube "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
@@ -33,6 +33,8 @@ import (
"open-cluster-management.io/ocm/pkg/work/spoke/spoketesting"
)
const defaultOwner = "testowner"
type testController struct {
controller *ManifestWorkController
dynamicClient *fakedynamic.FakeDynamicClient
@@ -201,8 +203,7 @@ func (t *testCase) validate(
dynamicClient *fakedynamic.FakeDynamicClient,
workClient *fakeworkclient.Clientset,
kubeClient *fakekube.Clientset) {
actualWorkActions := []clienttesting.Action{}
actualAppliedWorkActions := []clienttesting.Action{}
var actualWorkActions, actualAppliedWorkActions []clienttesting.Action
for _, workAction := range workClient.Actions() {
if workAction.GetResource().Resource == "manifestworks" {
actualWorkActions = append(actualWorkActions, workAction)
@@ -280,14 +281,14 @@ func TestSync(t *testing.T) {
withAppliedWorkAction("create").
withExpectedKubeAction("get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("create single deployment resource").
withWorkManifest(spoketesting.NewUnstructured("apps/v1", "Deployment", "ns1", "test")).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("update single resource").
withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test")).
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
@@ -295,30 +296,38 @@ func TestSync(t *testing.T) {
withAppliedWorkAction("create").
withExpectedKubeAction("get", "delete", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("create single unstructured resource").
withWorkManifest(spoketesting.NewUnstructured("v1", "NewObject", "ns1", "test")).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("update single unstructured resource").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("multiple create&update resource").
withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")).
withWorkManifest(spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test"),
spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")).
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedKubeAction("get", "delete", "create", "get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedManifestCondition(
expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue},
expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
}
for _, c := range cases {
@@ -342,13 +351,17 @@ func TestSync(t *testing.T) {
// Test applying resource failed
func TestFailedToApplyResource(t *testing.T) {
tc := newTestCase("multiple create&update resource").
withWorkManifest(spoketesting.NewUnstructured("v1", "Secret", "ns1", "test"), spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")).
withWorkManifest(spoketesting.NewUnstructured(
"v1", "Secret", "ns1", "test"),
spoketesting.NewUnstructured("v1", "Secret", "ns2", "test")).
withSpokeObject(spoketesting.NewSecret("test", "ns1", "value2")).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedKubeAction("get", "delete", "create", "get", "create").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}, expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse})
withExpectedManifestCondition(
expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue},
expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}).
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionFalse})
work, workKey := spoketesting.NewManifestWork(0, tc.workManifest...)
work.Finalizers = []string{controllers.ManifestWorkFinalizer}
@@ -366,7 +379,7 @@ func TestFailedToApplyResource(t *testing.T) {
return false, createObject, nil
}
return true, &corev1.Secret{}, fmt.Errorf("Fake error")
return true, &corev1.Secret{}, fmt.Errorf("fake error")
})
syncContext := testingcommon.NewFakeSyncContext(t, workKey)
err := controller.toController().sync(context.TODO(), syncContext)
@@ -380,58 +393,91 @@ func TestFailedToApplyResource(t *testing.T) {
func TestUpdateStrategy(t *testing.T) {
cases := []*testCase{
newTestCase("update single resource with nil updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", nil)).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1", nil)).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("update single resource with update updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeUpdate})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("create single resource with updateStrategy not found").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n2", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n2",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "update").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("create single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("patch", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("update single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("patch", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
newTestCase("update single resource with create only updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeCreateOnly})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeCreateOnly})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("get", "patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionTrue}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionTrue}),
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionTrue}),
}
for _, c := range cases {
@@ -444,9 +490,13 @@ func TestUpdateStrategy(t *testing.T) {
withUnstructuredObject(c.spokeDynamicObject...)
// The default reactor doesn't support apply, so we need our own (trivial) reactor
controller.dynamicClient.PrependReactor("patch", "newobjects", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
return true, spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), nil // clusterroleaggregator drops returned objects so no point in constructing them
})
controller.dynamicClient.PrependReactor("patch", "newobjects",
func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
// clusterroleaggregator drops returned objects so no point in constructing them
return true, spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}}), nil
})
syncContext := testingcommon.NewFakeSyncContext(t, workKey)
err := controller.toController().sync(context.TODO(), syncContext)
if err != nil {
@@ -460,14 +510,20 @@ func TestUpdateStrategy(t *testing.T) {
func TestServerSideApplyConflict(t *testing.T) {
testCase := newTestCase("update single resource with server side apply updateStrategy").
withWorkManifest(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent("v1", "NewObject", "ns1", "n1", map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption("", "newobjects", "ns1", "n1", &workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withWorkManifest(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val1"}})).
withSpokeDynamicObject(spoketesting.NewUnstructuredWithContent(
"v1", "NewObject", "ns1", "n1",
map[string]interface{}{"spec": map[string]interface{}{"key1": "val2"}})).
withManifestConfig(newManifestConfigOption(
"", "newobjects", "ns1", "n1",
&workapiv1.UpdateStrategy{Type: workapiv1.UpdateStrategyTypeServerSideApply})).
withExpectedWorkAction("patch").
withAppliedWorkAction("create").
withExpectedDynamicAction("patch").
withExpectedManifestCondition(expectedCondition{string(workapiv1.ManifestApplied), metav1.ConditionFalse}).
withExpectedWorkCondition(expectedCondition{string(workapiv1.WorkApplied), metav1.ConditionFalse})
withExpectedWorkCondition(expectedCondition{workapiv1.WorkApplied, metav1.ConditionFalse})
work, workKey := spoketesting.NewManifestWork(0, testCase.workManifest...)
work.Spec.ManifestConfigs = testCase.workManifestConfig
@@ -613,7 +669,7 @@ func TestBuildResourceMeta(t *testing.T) {
actual.Ordinal = c.expected.Ordinal
if !equality.Semantic.DeepEqual(actual, c.expected) {
t.Errorf(diff.ObjectDiff(actual, c.expected))
t.Errorf(cmp.Diff(actual, c.expected))
}
})
}
@@ -644,7 +700,7 @@ func TestBuildManifestResourceMeta(t *testing.T) {
actual.Ordinal = c.expected.Ordinal
if !equality.Semantic.DeepEqual(actual, c.expected) {
t.Errorf(diff.ObjectDiff(actual, c.expected))
t.Errorf(cmp.Diff(actual, c.expected))
}
})
}
@@ -665,24 +721,24 @@ func TestManageOwner(t *testing.T) {
}{
{
name: "foreground by default",
owner: metav1.OwnerReference{UID: "testowner"},
expectOwner: metav1.OwnerReference{UID: "testowner"},
owner: metav1.OwnerReference{UID: defaultOwner},
expectOwner: metav1.OwnerReference{UID: defaultOwner},
},
{
name: "orphan the resource",
owner: metav1.OwnerReference{UID: "testowner"},
owner: metav1.OwnerReference{UID: defaultOwner},
deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan},
expectOwner: metav1.OwnerReference{UID: "testowner-"},
},
{
name: "add owner if no orphan rule with selectively orphan",
owner: metav1.OwnerReference{UID: "testowner"},
owner: metav1.OwnerReference{UID: defaultOwner},
deleteOption: &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan},
expectOwner: metav1.OwnerReference{UID: "testowner"},
expectOwner: metav1.OwnerReference{UID: defaultOwner},
},
{
name: "orphan the resource with selectively orphan",
owner: metav1.OwnerReference{UID: "testowner"},
owner: metav1.OwnerReference{UID: defaultOwner},
deleteOption: &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan,
SelectivelyOrphan: &workapiv1.SelectivelyOrphan{
@@ -700,7 +756,7 @@ func TestManageOwner(t *testing.T) {
},
{
name: "add owner if resourcec is not matched in orphan rule with selectively orphan",
owner: metav1.OwnerReference{UID: "testowner"},
owner: metav1.OwnerReference{UID: defaultOwner},
deleteOption: &workapiv1.DeleteOption{
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan,
SelectivelyOrphan: &workapiv1.SelectivelyOrphan{
@@ -714,7 +770,7 @@ func TestManageOwner(t *testing.T) {
},
},
},
expectOwner: metav1.OwnerReference{UID: "testowner"},
expectOwner: metav1.OwnerReference{UID: defaultOwner},
},
}

View File

@@ -205,8 +205,8 @@ func aggregateManifestConditions(generation int64, manifests []workapiv1.Manifes
func (c *AvailableStatusController) getFeedbackValues(
resourceMeta workapiv1.ManifestResourceMeta, obj *unstructured.Unstructured,
manifestOptions []workapiv1.ManifestConfigOption) ([]workapiv1.FeedbackValue, metav1.Condition) {
errs := []error{}
values := []workapiv1.FeedbackValue{}
var errs []error
var values []workapiv1.FeedbackValue
option := helper.FindManifestConiguration(resourceMeta, manifestOptions)

View File

@@ -115,7 +115,7 @@ func TestSyncManifestWork(t *testing.T) {
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[0].Conditions))
}
if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionTrue) {
if !hasStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable, metav1.ConditionTrue) {
t.Fatal(spew.Sdump(work.Status.Conditions))
}
},
@@ -184,7 +184,7 @@ func TestSyncManifestWork(t *testing.T) {
t.Fatal(spew.Sdump(work.Status.ResourceStatus.Manifests[1].Conditions))
}
if !hasStatusCondition(work.Status.Conditions, string(workapiv1.WorkAvailable), metav1.ConditionUnknown) {
if !hasStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable, metav1.ConditionUnknown) {
t.Fatal(spew.Sdump(work.Status.Conditions))
}
},
@@ -230,7 +230,7 @@ func TestStatusFeedback(t *testing.T) {
validateActions func(t *testing.T, actions []clienttesting.Action)
}{
{
name: "resource identifer is not matched",
name: "resource identifier is not matched",
existingResources: []runtime.Object{
spoketesting.NewUnstructuredSecret("ns1", "n1", false, "ns1-n1"),
},

View File

@@ -30,8 +30,8 @@ func NewStatusReader() *StatusReader {
}
func (s *StatusReader) GetValuesByRule(obj *unstructured.Unstructured, rule workapiv1.FeedbackRule) ([]workapiv1.FeedbackValue, error) {
errs := []error{}
values := []workapiv1.FeedbackValue{}
var errs []error
var values []workapiv1.FeedbackValue
switch rule.Type {
case workapiv1.WellKnownStatusType:

View File

@@ -1,6 +1,8 @@
package webhook
import (
"crypto/tls"
"k8s.io/apimachinery/pkg/runtime"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
@@ -30,10 +32,16 @@ func init() {
func (c *Options) RunWebhookServer() error {
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Port: c.Port,
HealthProbeBindAddress: ":8000",
CertDir: c.CertDir,
WebhookServer: webhook.NewServer(webhook.Options{TLSMinVersion: "1.3"}),
WebhookServer: webhook.NewServer(webhook.Options{
TLSOpts: []func(config *tls.Config){
func(config *tls.Config) {
config.MinVersion = tls.VersionTLS13
},
},
Port: c.Port,
CertDir: c.CertDir,
}),
})
if err != nil {

View File

@@ -49,7 +49,7 @@ func (r *ManifestWorkWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *ManifestWorkWebhook) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) {
func (r *ManifestWorkWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@@ -100,7 +100,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) {
},
},
},
expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test2 cannot manipulate the Manifestwork with executor /klusterlet-work-sa in namespace cluster1")),
expectErr: apierrors.NewBadRequest(
"user test2 cannot manipulate the Manifestwork with executor /klusterlet-work-sa in namespace cluster1"),
},
{
name: "validate executor not nil success",
@@ -164,7 +165,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) {
},
},
},
expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1")),
expectErr: apierrors.NewBadRequest(
"user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1"),
},
{
name: "validate executor not changed success",
@@ -246,7 +248,8 @@ func TestManifestWorkExecutorValidate(t *testing.T) {
},
},
},
expectErr: apierrors.NewBadRequest(fmt.Sprintf("user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1")),
expectErr: apierrors.NewBadRequest(
"user test1 cannot manipulate the Manifestwork with executor ns1/executor2 in namespace cluster1"),
},
}

View File

@@ -45,7 +45,7 @@ func (r *ManifestWorkReplicaSetWebhook) ValidateUpdate(ctx context.Context, oldO
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, obj runtime.Object) (
func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (
admission.Warnings, error) {
if err := checkFeatureEnabled(); err != nil {
return nil, err
@@ -55,7 +55,7 @@ func (r *ManifestWorkReplicaSetWebhook) ValidateDelete(_ context.Context, obj ru
}
func (r *ManifestWorkReplicaSetWebhook) validateRequest(
newmwrSet *workv1alpha1.ManifestWorkReplicaSet, oldmwrSet *workv1alpha1.ManifestWorkReplicaSet,
newmwrSet *workv1alpha1.ManifestWorkReplicaSet, _ *workv1alpha1.ManifestWorkReplicaSet,
ctx context.Context) error {
if err := checkFeatureEnabled(); err != nil {
return err

View File

@@ -4,7 +4,7 @@ import (
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
v1alpha1 "open-cluster-management.io/api/work/v1alpha1"
"open-cluster-management.io/api/work/v1alpha1"
)
type ManifestWorkReplicaSetWebhook struct {

Some files were not shown because too many files have changed in this diff Show More