mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
Build common options for agent (#163)
Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
12
.github/workflows/e2e.yml
vendored
12
.github/workflows/e2e.yml
vendored
@@ -33,15 +33,15 @@ jobs:
|
||||
- name: install imagebuilder
|
||||
run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3
|
||||
- name: Build images
|
||||
run: make images
|
||||
run: IMAGE_TAG=e2e make images
|
||||
- name: Load images
|
||||
run: |
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration-operator:latest
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration:latest
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/work:latest
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/placement:latest
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration-operator:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/work:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/placement:e2e
|
||||
- name: Test E2E
|
||||
run: |
|
||||
make test-e2e
|
||||
IMAGE_TAG=e2e make test-e2e
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
|
||||
2
Makefile
2
Makefile
@@ -38,7 +38,7 @@ REGISTRATION_IMAGE ?= $(IMAGE_REGISTRY)/registration:$(IMAGE_TAG)
|
||||
# PLACEMENT_IMAGE can be set in the env to override calculated value
|
||||
PLACEMENT_IMAGE ?= $(IMAGE_REGISTRY)/placement:$(IMAGE_TAG)
|
||||
# ADDON_MANAGER_IMAGE can be set in the env to override calculated value
|
||||
ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:$(IMAGE_TAG)
|
||||
ADDON_MANAGER_IMAGE ?= $(IMAGE_REGISTRY)/addon-manager:latest
|
||||
|
||||
$(call build-image,registration,$(REGISTRATION_IMAGE),./build/Dockerfile.registration,.)
|
||||
$(call build-image,work,$(WORK_IMAGE),./build/Dockerfile.work,.)
|
||||
|
||||
@@ -56,7 +56,6 @@ spec:
|
||||
{{ if .HostedMode }}
|
||||
- "--kubeconfig=/var/run/secrets/hub/kubeconfig"
|
||||
{{ end }}
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
cpu: 2m
|
||||
|
||||
@@ -51,7 +51,7 @@ spec:
|
||||
args:
|
||||
- "/registration"
|
||||
- "agent"
|
||||
- "--cluster-name={{ .ClusterName }}"
|
||||
- "--spoke-cluster-name={{ .ClusterName }}"
|
||||
- "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig"
|
||||
{{ if gt (len .RegistrationFeatureGates) 0 }}
|
||||
{{range .RegistrationFeatureGates}}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager"
|
||||
"open-cluster-management.io/ocm/pkg/version"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet"
|
||||
"open-cluster-management.io/ocm/pkg/version"
|
||||
)
|
||||
|
||||
|
||||
64
pkg/common/options/options.go
Normal file
64
pkg/common/options/options.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/pflag"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AgentOptions is the common agent options
|
||||
type AgentOptions struct {
|
||||
SpokeKubeconfigFile string
|
||||
SpokeClusterName string
|
||||
Burst int
|
||||
QPS float32
|
||||
}
|
||||
|
||||
// NewWorkloadAgentOptions returns the flags with default value set
|
||||
func NewAgentOptions() *AgentOptions {
|
||||
return &AgentOptions{
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.SpokeKubeconfigFile, "spoke-kubeconfig", o.SpokeKubeconfigFile,
|
||||
"Location of kubeconfig file to connect to spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.")
|
||||
flags.StringVar(&o.SpokeClusterName, "spoke-cluster-name", o.SpokeClusterName, "Name of the spoke cluster.")
|
||||
flags.MarkDeprecated("cluster-name", "use spoke-cluster-name flag")
|
||||
flags.StringVar(&o.SpokeClusterName, "cluster-name", o.SpokeClusterName,
|
||||
"Name of the spoke cluster.")
|
||||
flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.")
|
||||
flags.IntVar(&o.Burst, "spoke-kube-api-burst", o.Burst, "Burst to use while talking with apiserver on spoke cluster.")
|
||||
}
|
||||
|
||||
// spokeKubeConfig builds kubeconfig for the spoke/managed cluster
|
||||
func (o *AgentOptions) SpokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) {
|
||||
if o.SpokeKubeconfigFile == "" {
|
||||
return controllerContext.KubeConfig, nil
|
||||
}
|
||||
|
||||
spokeRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfigFile, err)
|
||||
}
|
||||
spokeRestConfig.QPS = o.QPS
|
||||
spokeRestConfig.Burst = o.Burst
|
||||
return spokeRestConfig, nil
|
||||
}
|
||||
|
||||
func (o *AgentOptions) Validate() error {
|
||||
if o.SpokeClusterName == "" {
|
||||
return fmt.Errorf("cluster name is empty")
|
||||
}
|
||||
if errMsgs := apimachineryvalidation.ValidateNamespaceName(o.SpokeClusterName, false); len(errMsgs) > 0 {
|
||||
return fmt.Errorf("metadata.name format is not correct: %s", strings.Join(errMsgs, ","))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
opratorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
func NamedCondition(name, reason string, status metav1.ConditionStatus) metav1.Condition {
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/certrotation"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/certrotation"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
fakemigrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/fake"
|
||||
migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/crdmanager"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/crdmanager"
|
||||
migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
)
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
|
||||
migrationv1alpha1client "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
|
||||
)
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing"
|
||||
)
|
||||
|
||||
const testClusterManagerName = "testclustermanager"
|
||||
@@ -7,18 +7,18 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/clustermanagercontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/crdstatuccontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
clustermanagerstatuscontroller "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/statuscontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/certrotationcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller"
|
||||
clustermanagerstatuscontroller "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/statuscontroller"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
coreinformer "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
type klusterletCleanupController struct {
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/klog/v2"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
// TestSyncDelete test cleanup hub deploy
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,8 +34,8 @@ import (
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing"
|
||||
)
|
||||
|
||||
type testController struct {
|
||||
@@ -350,7 +350,7 @@ func assertRegistrationDeployment(t *testing.T, actions []clienttesting.Action,
|
||||
expectedArgs := []string{
|
||||
"/registration",
|
||||
"agent",
|
||||
fmt.Sprintf("--cluster-name=%s", clusterName),
|
||||
fmt.Sprintf("--spoke-cluster-name=%s", clusterName),
|
||||
"--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig",
|
||||
"--feature-gates=AddonManagement=true",
|
||||
}
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/crdmanager"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/crdmanager"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/manifests"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
// runtimeReconcile ensure all runtime of klusterlet is applied
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
// SSARReSyncTime is exposed so that integration tests can crank up the controller sync speed.
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing"
|
||||
)
|
||||
|
||||
type testController struct {
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
operatorv1client "open-cluster-management.io/api/client/operator/clientset/versioned/typed/operator/v1"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
type klusterletStatusController struct {
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/registration-operator/helpers/testing"
|
||||
testinghelper "open-cluster-management.io/ocm/pkg/operator/helpers/testing"
|
||||
)
|
||||
|
||||
type testController struct {
|
||||
@@ -14,11 +14,11 @@ import (
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/addonsecretcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/statuscontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/klusterletcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/ssarcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/statuscontroller"
|
||||
)
|
||||
|
||||
// defaultSpokeComponentNamespace is the default namespace in which the operator is deployed
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions"
|
||||
clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
@@ -54,8 +54,8 @@ var AddOnLeaseControllerSyncInterval = 30 * time.Second
|
||||
|
||||
// SpokeAgentOptions holds configuration for spoke cluster agent
|
||||
type SpokeAgentOptions struct {
|
||||
AgentOptions *commonoptions.AgentOptions
|
||||
ComponentNamespace string
|
||||
ClusterName string
|
||||
AgentName string
|
||||
BootstrapKubeconfig string
|
||||
HubKubeconfigSecret string
|
||||
@@ -63,13 +63,13 @@ type SpokeAgentOptions struct {
|
||||
SpokeExternalServerURLs []string
|
||||
ClusterHealthCheckPeriod time.Duration
|
||||
MaxCustomClusterClaims int
|
||||
SpokeKubeconfig string
|
||||
ClientCertExpirationSeconds int32
|
||||
}
|
||||
|
||||
// NewSpokeAgentOptions returns a SpokeAgentOptions
|
||||
func NewSpokeAgentOptions() *SpokeAgentOptions {
|
||||
return &SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
@@ -116,7 +116,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
|
||||
// load spoke client config and create spoke clients,
|
||||
// the registration agent may not running in the spoke/managed cluster.
|
||||
spokeClientConfig, err := o.spokeKubeConfig(controllerContext)
|
||||
spokeClientConfig, err := o.AgentOptions.SpokeKubeConfig(controllerContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
klog.Infof("Cluster name is %q and agent name is %q", o.ClusterName, o.AgentName)
|
||||
klog.Infof("Cluster name is %q and agent name is %q", o.AgentOptions.SpokeClusterName, o.AgentName)
|
||||
|
||||
// create shared informer factory for spoke cluster
|
||||
spokeKubeInformerFactory := informers.NewSharedInformerFactory(spokeKubeClient, 10*time.Minute)
|
||||
@@ -165,7 +165,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
|
||||
// start a SpokeClusterCreatingController to make sure there is a spoke cluster on hub cluster
|
||||
spokeClusterCreatingController := managedcluster.NewManagedClusterCreatingController(
|
||||
o.ClusterName, o.SpokeExternalServerURLs,
|
||||
o.AgentOptions.SpokeClusterName, o.SpokeExternalServerURLs,
|
||||
spokeClusterCABundle,
|
||||
bootstrapClusterClient,
|
||||
controllerContext.EventRecorder,
|
||||
@@ -211,9 +211,9 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
return err
|
||||
}
|
||||
|
||||
controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.ClusterName)
|
||||
controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName)
|
||||
clientCertForHubController := managedcluster.NewClientCertForHubController(
|
||||
o.ClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
kubeconfigData,
|
||||
// store the secret in the cluster where the agent pod runs
|
||||
bootstrapNamespacedManagementKubeInformerFactory.Core().V1().Secrets(),
|
||||
@@ -269,17 +269,17 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
hubKubeClient,
|
||||
10*time.Minute,
|
||||
informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
|
||||
listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.ClusterName)
|
||||
listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.AgentOptions.SpokeClusterName)
|
||||
}),
|
||||
)
|
||||
addOnInformerFactory := addoninformers.NewSharedInformerFactoryWithOptions(
|
||||
addOnClient, 10*time.Minute, addoninformers.WithNamespace(o.ClusterName))
|
||||
addOnClient, 10*time.Minute, addoninformers.WithNamespace(o.AgentOptions.SpokeClusterName))
|
||||
// create a cluster informer factory with name field selector because we just need to handle the current spoke cluster
|
||||
hubClusterInformerFactory := clusterv1informers.NewSharedInformerFactoryWithOptions(
|
||||
hubClusterClient,
|
||||
10*time.Minute,
|
||||
clusterv1informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.ClusterName).String()
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.AgentOptions.SpokeClusterName).String()
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -298,15 +298,15 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
}
|
||||
|
||||
// create another ClientCertForHubController for client certificate rotation
|
||||
controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.ClusterName)
|
||||
controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName)
|
||||
clientCertForHubController := managedcluster.NewClientCertForHubController(
|
||||
o.ClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
kubeconfigData,
|
||||
namespacedManagementKubeInformerFactory.Core().V1().Secrets(),
|
||||
csrControl,
|
||||
o.ClientCertExpirationSeconds,
|
||||
managementKubeClient,
|
||||
managedcluster.GenerateStatusUpdater(hubClusterClient, o.ClusterName),
|
||||
managedcluster.GenerateStatusUpdater(hubClusterClient, o.AgentOptions.SpokeClusterName),
|
||||
controllerContext.EventRecorder,
|
||||
controllerName,
|
||||
)
|
||||
@@ -316,7 +316,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
|
||||
// create ManagedClusterJoiningController to reconcile instances of ManagedCluster on the managed cluster
|
||||
managedClusterJoiningController := managedcluster.NewManagedClusterJoiningController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
controllerContext.EventRecorder,
|
||||
@@ -324,7 +324,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
|
||||
// create ManagedClusterLeaseController to keep the spoke cluster heartbeat
|
||||
managedClusterLeaseController := managedcluster.NewManagedClusterLeaseController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
hubKubeClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
controllerContext.EventRecorder,
|
||||
@@ -332,7 +332,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
|
||||
// create NewManagedClusterStatusController to update the spoke cluster status
|
||||
managedClusterHealthCheckController := managedcluster.NewManagedClusterStatusController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
spokeKubeClient.Discovery(),
|
||||
@@ -350,7 +350,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
|
||||
// create managedClusterClaimController to sync cluster claims
|
||||
managedClusterClaimController = managedcluster.NewManagedClusterClaimController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.MaxCustomClusterClaims,
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
@@ -363,7 +363,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
var addOnRegistrationController factory.Controller
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.AddonManagement) {
|
||||
addOnLeaseController = addon.NewManagedClusterAddOnLeaseController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
addOnClient,
|
||||
addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
hubKubeClient.CoordinationV1(),
|
||||
@@ -374,7 +374,7 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
)
|
||||
|
||||
addOnRegistrationController = addon.NewAddOnRegistrationController(
|
||||
o.ClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.AgentName,
|
||||
kubeconfigData,
|
||||
addOnClient,
|
||||
@@ -412,16 +412,13 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
// AddFlags registers flags for Agent
|
||||
func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
features.DefaultSpokeRegistrationMutableFeatureGate.AddFlag(fs)
|
||||
fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName,
|
||||
"If non-empty, will use as cluster name instead of generated random name.")
|
||||
o.AgentOptions.AddFlags(fs)
|
||||
fs.StringVar(&o.BootstrapKubeconfig, "bootstrap-kubeconfig", o.BootstrapKubeconfig,
|
||||
"The path of the kubeconfig file for agent bootstrap.")
|
||||
fs.StringVar(&o.HubKubeconfigSecret, "hub-kubeconfig-secret", o.HubKubeconfigSecret,
|
||||
"The name of secret in component namespace storing kubeconfig for hub.")
|
||||
fs.StringVar(&o.HubKubeconfigDir, "hub-kubeconfig-dir", o.HubKubeconfigDir,
|
||||
"The mount path of hub-kubeconfig-secret in the container.")
|
||||
fs.StringVar(&o.SpokeKubeconfig, "spoke-kubeconfig", o.SpokeKubeconfig,
|
||||
"The path of the kubeconfig file for managed/spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.")
|
||||
fs.StringArrayVar(&o.SpokeExternalServerURLs, "spoke-external-server-urls", o.SpokeExternalServerURLs,
|
||||
"A list of reachable spoke cluster api server URLs for hub cluster.")
|
||||
fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod,
|
||||
@@ -438,8 +435,8 @@ func (o *SpokeAgentOptions) Validate() error {
|
||||
return errors.New("bootstrap-kubeconfig is required")
|
||||
}
|
||||
|
||||
if o.ClusterName == "" {
|
||||
return errors.New("cluster name is empty")
|
||||
if err := o.AgentOptions.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.AgentName == "" {
|
||||
@@ -469,7 +466,7 @@ func (o *SpokeAgentOptions) Validate() error {
|
||||
// Complete fills in missing values.
|
||||
func (o *SpokeAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface, ctx context.Context, recorder events.Recorder) error {
|
||||
// get component namespace of spoke agent
|
||||
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
o.ComponentNamespace = defaultSpokeComponentNamespace
|
||||
} else {
|
||||
@@ -484,7 +481,7 @@ func (o *SpokeAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface,
|
||||
}
|
||||
|
||||
// load or generate cluster/agent names
|
||||
o.ClusterName, o.AgentName = o.getOrGenerateClusterAgentNames()
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName = o.getOrGenerateClusterAgentNames()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -523,7 +520,7 @@ func (o *SpokeAgentOptions) hasValidHubClientConfig() (bool, error) {
|
||||
}
|
||||
|
||||
certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certData, err := ioutil.ReadFile(path.Clean(certPath))
|
||||
certData, err := os.ReadFile(path.Clean(certPath))
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to load TLS cert file %q", certPath)
|
||||
return false, nil
|
||||
@@ -534,10 +531,10 @@ func (o *SpokeAgentOptions) hasValidHubClientConfig() (bool, error) {
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if clusterName != o.ClusterName || agentName != o.AgentName {
|
||||
if clusterName != o.AgentOptions.SpokeClusterName || agentName != o.AgentName {
|
||||
klog.V(4).Infof("Certificate in file %q is issued for agent %q instead of %q",
|
||||
certPath, fmt.Sprintf("%s:%s", clusterName, agentName),
|
||||
fmt.Sprintf("%s:%s", o.ClusterName, o.AgentName))
|
||||
fmt.Sprintf("%s:%s", o.AgentOptions.SpokeClusterName, o.AgentName))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -560,19 +557,19 @@ func (o *SpokeAgentOptions) getOrGenerateClusterAgentNames() (string, string) {
|
||||
// try to load cluster/agent name from tls certification
|
||||
var clusterNameInCert, agentNameInCert string
|
||||
certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certData, certErr := ioutil.ReadFile(path.Clean(certPath))
|
||||
certData, certErr := os.ReadFile(path.Clean(certPath))
|
||||
if certErr == nil {
|
||||
clusterNameInCert, agentNameInCert, _ = managedcluster.GetClusterAgentNamesFromCertificate(certData)
|
||||
}
|
||||
|
||||
clusterName := o.ClusterName
|
||||
clusterName := o.AgentOptions.SpokeClusterName
|
||||
// if cluster name is not specified with input argument, try to load it from file
|
||||
if clusterName == "" {
|
||||
// TODO, read cluster name from openshift struct if the spoke agent is running in an openshift cluster
|
||||
|
||||
// and then load the cluster name from the mounted secret
|
||||
clusterNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.ClusterNameFile)
|
||||
clusterNameBytes, err := ioutil.ReadFile(path.Clean(clusterNameFilePath))
|
||||
clusterNameBytes, err := os.ReadFile(path.Clean(clusterNameFilePath))
|
||||
switch {
|
||||
case len(clusterNameInCert) > 0:
|
||||
// use cluster name loaded from the tls certification
|
||||
@@ -591,7 +588,7 @@ func (o *SpokeAgentOptions) getOrGenerateClusterAgentNames() (string, string) {
|
||||
|
||||
// try to load agent name from the mounted secret
|
||||
agentNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.AgentNameFile)
|
||||
agentNameBytes, err := ioutil.ReadFile(path.Clean(agentNameFilePath))
|
||||
agentNameBytes, err := os.ReadFile(path.Clean(agentNameFilePath))
|
||||
var agentName string
|
||||
switch {
|
||||
case len(agentNameInCert) > 0:
|
||||
@@ -619,22 +616,9 @@ func (o *SpokeAgentOptions) getSpokeClusterCABundle(kubeConfig *rest.Config) ([]
|
||||
if kubeConfig.CAData != nil {
|
||||
return kubeConfig.CAData, nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(kubeConfig.CAFile)
|
||||
data, err := os.ReadFile(kubeConfig.CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// spokeKubeConfig builds kubeconfig for the spoke/managed cluster
|
||||
func (o *SpokeAgentOptions) spokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) {
|
||||
if o.SpokeKubeconfig == "" {
|
||||
return controllerContext.KubeConfig, nil
|
||||
}
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfig, err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package spoke
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
func TestComplete(t *testing.T) {
|
||||
// get component namespace
|
||||
var componentNamespace string
|
||||
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
componentNamespace = defaultSpokeComponentNamespace
|
||||
} else {
|
||||
@@ -102,14 +102,16 @@ func TestComplete(t *testing.T) {
|
||||
kubeClient := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
// create a tmp dir to dump hub kubeconfig
|
||||
dir, err := ioutil.TempDir("", "hub-kubeconfig")
|
||||
dir, err := os.MkdirTemp("", "hub-kubeconfig")
|
||||
if err != nil {
|
||||
t.Error("unable to create a tmp dir")
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
options := &SpokeAgentOptions{
|
||||
ClusterName: c.clusterName,
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: c.clusterName,
|
||||
},
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: dir,
|
||||
}
|
||||
@@ -120,14 +122,14 @@ func TestComplete(t *testing.T) {
|
||||
if options.ComponentNamespace == "" {
|
||||
t.Error("component namespace should not be empty")
|
||||
}
|
||||
if options.ClusterName == "" {
|
||||
if options.AgentOptions.SpokeClusterName == "" {
|
||||
t.Error("cluster name should not be empty")
|
||||
}
|
||||
if options.AgentName == "" {
|
||||
t.Error("agent name should not be empty")
|
||||
}
|
||||
if len(c.expectedClusterName) > 0 && options.ClusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.ClusterName)
|
||||
if len(c.expectedClusterName) > 0 && options.AgentOptions.SpokeClusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.AgentOptions.SpokeClusterName)
|
||||
}
|
||||
if len(c.expectedAgentName) > 0 && options.AgentName != c.expectedAgentName {
|
||||
t.Errorf("expect agent name %q but got %q", c.expectedAgentName, options.AgentName)
|
||||
@@ -139,7 +141,7 @@ func TestComplete(t *testing.T) {
|
||||
func TestValidate(t *testing.T) {
|
||||
defaultCompletedOptions := NewSpokeAgentOptions()
|
||||
defaultCompletedOptions.BootstrapKubeconfig = "/spoke/bootstrap/kubeconfig"
|
||||
defaultCompletedOptions.ClusterName = "testcluster"
|
||||
defaultCompletedOptions.AgentOptions.SpokeClusterName = "testcluster"
|
||||
defaultCompletedOptions.AgentName = "testagent"
|
||||
|
||||
cases := []struct {
|
||||
@@ -154,19 +156,21 @@ func TestValidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no cluster name",
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig"},
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{}},
|
||||
expectedErr: "cluster name is empty",
|
||||
},
|
||||
{
|
||||
name: "no agent name",
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", ClusterName: "testcluster"},
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "testcluster"}},
|
||||
expectedErr: "agent name is empty",
|
||||
},
|
||||
{
|
||||
name: "invalid external server URLs",
|
||||
options: &SpokeAgentOptions{
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClusterName: "testcluster",
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
SpokeExternalServerURLs: []string{"https://127.0.0.1:64433", "http://127.0.0.1:8080"},
|
||||
},
|
||||
@@ -175,8 +179,10 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "invalid cluster healthcheck period",
|
||||
options: &SpokeAgentOptions{
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClusterName: "testcluster",
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
ClusterHealthCheckPeriod: 0,
|
||||
},
|
||||
@@ -190,12 +196,14 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "default completed options",
|
||||
options: &SpokeAgentOptions{
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClusterName: "testcluster",
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
ClientCertExpirationSeconds: 3599,
|
||||
},
|
||||
@@ -204,12 +212,14 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "default completed options",
|
||||
options: &SpokeAgentOptions{
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClusterName: "testcluster",
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
ClientCertExpirationSeconds: 3600,
|
||||
},
|
||||
@@ -225,7 +235,7 @@ func TestValidate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHasValidHubClientConfig(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "testvalidhubclientconfig")
|
||||
tempDir, err := os.MkdirTemp("", "testvalidhubclientconfig")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -292,7 +302,9 @@ func TestHasValidHubClientConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
options := &SpokeAgentOptions{
|
||||
ClusterName: c.clusterName,
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: c.clusterName,
|
||||
},
|
||||
AgentName: c.agentName,
|
||||
HubKubeconfigDir: tempDir,
|
||||
}
|
||||
@@ -308,7 +320,7 @@ func TestHasValidHubClientConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetOrGenerateClusterAgentNames(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "testgetorgenerateclusteragentnames")
|
||||
tempDir, err := os.MkdirTemp("", "testgetorgenerateclusteragentnames")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -322,12 +334,12 @@ func TestGetOrGenerateClusterAgentNames(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "cluster name is specified",
|
||||
options: &SpokeAgentOptions{ClusterName: "cluster0"},
|
||||
options: &SpokeAgentOptions{AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "cluster0"}},
|
||||
expectedClusterName: "cluster0",
|
||||
},
|
||||
{
|
||||
name: "cluster name and agent name are in file",
|
||||
options: &SpokeAgentOptions{HubKubeconfigDir: tempDir},
|
||||
options: &SpokeAgentOptions{HubKubeconfigDir: tempDir, AgentOptions: &commonoptions.AgentOptions{}},
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
@@ -356,7 +368,7 @@ func TestGetOrGenerateClusterAgentNames(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSpokeClusterCABundle(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "testgetspokeclustercabundle")
|
||||
tempDir, err := os.MkdirTemp("", "testgetspokeclustercabundle")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -2,21 +2,6 @@ package spoke
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
workinformers "open-cluster-management.io/api/client/work/informers/externalversions"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
"open-cluster-management.io/ocm/pkg/work/helper"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/auth"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/appliedmanifestcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/finalizercontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/manifestcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/statuscontroller"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
@@ -24,7 +9,19 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
workinformers "open-cluster-management.io/api/client/work/informers/externalversions"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/work/helper"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/auth"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/appliedmanifestcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/finalizercontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/manifestcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers/statuscontroller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,21 +38,17 @@ const (
|
||||
|
||||
// WorkloadAgentOptions defines the flags for workload agent
|
||||
type WorkloadAgentOptions struct {
|
||||
AgentOptions *commonoptions.AgentOptions
|
||||
HubKubeconfigFile string
|
||||
SpokeKubeconfigFile string
|
||||
SpokeClusterName string
|
||||
AgentID string
|
||||
Burst int
|
||||
StatusSyncInterval time.Duration
|
||||
AppliedManifestWorkEvictionGracePeriod time.Duration
|
||||
QPS float32
|
||||
}
|
||||
|
||||
// NewWorkloadAgentOptions returns the flags with default value set
|
||||
func NewWorkloadAgentOptions() *WorkloadAgentOptions {
|
||||
return &WorkloadAgentOptions{
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
StatusSyncInterval: 10 * time.Second,
|
||||
AppliedManifestWorkEvictionGracePeriod: 10 * time.Minute,
|
||||
}
|
||||
@@ -64,15 +57,11 @@ func NewWorkloadAgentOptions() *WorkloadAgentOptions {
|
||||
// AddFlags register and binds the default flags
|
||||
func (o *WorkloadAgentOptions) AddFlags(cmd *cobra.Command) {
|
||||
flags := cmd.Flags()
|
||||
o.AgentOptions.AddFlags(flags)
|
||||
features.DefaultSpokeWorkMutableFeatureGate.AddFlag(flags)
|
||||
// This command only supports reading from config
|
||||
flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.")
|
||||
flags.StringVar(&o.SpokeKubeconfigFile, "spoke-kubeconfig", o.SpokeKubeconfigFile,
|
||||
"Location of kubeconfig file to connect to spoke cluster. If this is not set, will use '--kubeconfig' to build client to connect to the managed cluster.")
|
||||
flags.StringVar(&o.SpokeClusterName, "spoke-cluster-name", o.SpokeClusterName, "Name of spoke cluster.")
|
||||
flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the work agent to identify the work this agent should handle after restart/recovery.")
|
||||
flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.")
|
||||
flags.IntVar(&o.Burst, "spoke-kube-api-burst", o.Burst, "Burst to use while talking with apiserver on spoke cluster.")
|
||||
flags.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.")
|
||||
flags.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction")
|
||||
}
|
||||
@@ -96,17 +85,15 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
return err
|
||||
}
|
||||
// Only watch the cluster namespace on hub
|
||||
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, workinformers.WithNamespace(o.SpokeClusterName))
|
||||
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, workinformers.WithNamespace(o.AgentOptions.SpokeClusterName))
|
||||
|
||||
// load spoke client config and create spoke clients,
|
||||
// the work agent may not running in the spoke/managed cluster.
|
||||
spokeRestConfig, err := o.spokeKubeConfig(controllerContext)
|
||||
spokeRestConfig, err := o.AgentOptions.SpokeKubeConfig(controllerContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spokeRestConfig.QPS = o.QPS
|
||||
spokeRestConfig.Burst = o.Burst
|
||||
spokeDynamicClient, err := dynamic.NewForConfig(spokeRestConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -138,7 +125,7 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
spokeRestConfig,
|
||||
spokeKubeClient,
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
o.SpokeClusterName,
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
controllerContext.EventRecorder,
|
||||
restMapper,
|
||||
).NewExecutorValidator(ctx, features.DefaultSpokeWorkMutableFeatureGate.Enabled(ocmfeature.ExecutorValidatingCaches))
|
||||
@@ -148,9 +135,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
spokeDynamicClient,
|
||||
spokeKubeClient,
|
||||
spokeAPIExtensionClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash, agentID,
|
||||
@@ -159,9 +146,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
)
|
||||
addFinalizerController := finalizercontroller.NewAddFinalizerController(
|
||||
controllerContext.EventRecorder,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
)
|
||||
appliedManifestWorkFinalizeController := finalizercontroller.NewAppliedManifestWorkFinalizeController(
|
||||
controllerContext.EventRecorder,
|
||||
@@ -172,9 +159,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
)
|
||||
manifestWorkFinalizeController := finalizercontroller.NewManifestWorkFinalizeController(
|
||||
controllerContext.EventRecorder,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash,
|
||||
@@ -182,7 +169,7 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
unmanagedAppliedManifestWorkController := finalizercontroller.NewUnManagedAppliedWorkController(
|
||||
controllerContext.EventRecorder,
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
o.AppliedManifestWorkEvictionGracePeriod,
|
||||
@@ -191,9 +178,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
appliedManifestWorkController := appliedmanifestcontroller.NewAppliedManifestWorkController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash,
|
||||
@@ -201,9 +188,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
availableStatusController := statuscontroller.NewAvailableStatusController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
o.StatusSyncInterval,
|
||||
)
|
||||
|
||||
@@ -219,16 +206,3 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// spokeKubeConfig builds kubeconfig for the spoke/managed cluster
|
||||
func (o *WorkloadAgentOptions) spokeKubeConfig(controllerContext *controllercmd.ControllerContext) (*rest.Config, error) {
|
||||
if o.SpokeKubeconfigFile == "" {
|
||||
return controllerContext.KubeConfig, nil
|
||||
}
|
||||
|
||||
spokeRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.SpokeKubeconfigFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load spoke kubeconfig from file %q: %w", o.SpokeKubeconfigFile, err)
|
||||
}
|
||||
return spokeRestConfig, nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ test-e2e: deploy-hub deploy-spoke-operator run-e2e
|
||||
|
||||
run-e2e: cluster-ip bootstrap-secret
|
||||
go test -c ./test/e2e
|
||||
./e2e.test -test.v -ginkgo.v -deploy-klusterlet=true -nil-executor-validating=true
|
||||
./e2e.test -test.v -ginkgo.v -deploy-klusterlet=true -nil-executor-validating=true -image-tag=e2e
|
||||
|
||||
clean-hub: clean-hub-cr clean-hub-operator
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
type Tester struct {
|
||||
@@ -64,12 +64,13 @@ type Tester struct {
|
||||
clusterManagerNamespace string
|
||||
operatorNamespace string
|
||||
klusterletOperator string
|
||||
imageTag string
|
||||
}
|
||||
|
||||
// kubeconfigPath is the path of kubeconfig file, will be get from env "KUBECONFIG" by default.
|
||||
// bootstrapHubSecret is the bootstrap hub kubeconfig secret, and the format is "namespace/secretName".
|
||||
// Default of bootstrapHubSecret is helpers.KlusterletDefaultNamespace/helpers.BootstrapHubKubeConfig.
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath string, timeout time.Duration) *Tester {
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath, imageTag string, timeout time.Duration) *Tester {
|
||||
var tester = Tester{
|
||||
hubKubeConfigPath: hubKubeConfigPath,
|
||||
spokeKubeConfigPath: spokeKubeConfigPath,
|
||||
@@ -79,6 +80,7 @@ func NewTester(hubKubeConfigPath, spokeKubeConfigPath string, timeout time.Durat
|
||||
clusterManagerNamespace: helpers.ClusterManagerDefaultNamespace,
|
||||
operatorNamespace: "open-cluster-management",
|
||||
klusterletOperator: "klusterlet",
|
||||
imageTag: imageTag,
|
||||
}
|
||||
|
||||
return &tester
|
||||
@@ -195,8 +197,8 @@ func (t *Tester) CreateKlusterlet(name, clusterName, klusterletNamespace string,
|
||||
Name: name,
|
||||
},
|
||||
Spec: operatorapiv1.KlusterletSpec{
|
||||
RegistrationImagePullSpec: "quay.io/open-cluster-management/registration:latest",
|
||||
WorkImagePullSpec: "quay.io/open-cluster-management/work:latest",
|
||||
RegistrationImagePullSpec: "quay.io/open-cluster-management/registration:" + t.imageTag,
|
||||
WorkImagePullSpec: "quay.io/open-cluster-management/work:" + t.imageTag,
|
||||
ExternalServerURLs: []operatorapiv1.ServerURL{
|
||||
{
|
||||
URL: "https://localhost",
|
||||
|
||||
@@ -14,6 +14,7 @@ var t *Tester
|
||||
var (
|
||||
clusterName string
|
||||
hubKubeconfig string
|
||||
imageTag string
|
||||
nilExecutorValidating bool
|
||||
deployKlusterlet bool
|
||||
managedKubeconfig string
|
||||
@@ -27,10 +28,11 @@ func init() {
|
||||
flag.BoolVar(&deployKlusterlet, "deploy-klusterlet", false, "Whether deploy the klusterlet on the managed cluster or not (default false)")
|
||||
flag.StringVar(&managedKubeconfig, "managed-kubeconfig", "", "The kubeconfig of the managed cluster")
|
||||
flag.DurationVar(&eventuallyTimeout, "eventually-timeout", 60*time.Second, "The timeout of Gomega's Eventually (default 60 seconds)")
|
||||
flag.StringVar(&imageTag, "image-tag", "latest", "Image tag to run the klusterlet, only used when enable deploy-klusterlet")
|
||||
}
|
||||
|
||||
func TestE2E(tt *testing.T) {
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, eventuallyTimeout)
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, imageTag, eventuallyTimeout)
|
||||
|
||||
OutputFail := func(message string, callerSkip ...int) {
|
||||
t.OutputDebugLogs()
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"k8s.io/client-go/util/cert"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
v1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
)
|
||||
|
||||
func updateDeploymentStatus(kubeClient kubernetes.Interface, namespace, deploymentName string) {
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager"
|
||||
certrotation "open-cluster-management.io/ocm/pkg/registration-operator/operators/clustermanager/controllers/certrotationcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager"
|
||||
certrotation "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/certrotationcontroller"
|
||||
)
|
||||
|
||||
func startHubOperator(ctx context.Context, mode v1.InstallMode) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package integration provides integration tests for open-cluster-management registration-operator, the test cases include
|
||||
// Package integration provides integration tests for open-cluster-management operator, the test cases include
|
||||
// - deploy/update/remove the cluster manager
|
||||
// - deploy/update/remove the klusterlet
|
||||
package operator
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
|
||||
operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/bootstrapcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet/controllers/ssarcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/ssarcontroller"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
@@ -73,7 +73,7 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
|
||||
var err error
|
||||
|
||||
// install registration-operator CRDs and start a local kube-apiserver
|
||||
// install operator CRDs and start a local kube-apiserver
|
||||
testEnv = &envtest.Environment{
|
||||
ErrorIfCRDPathMissing: true,
|
||||
CRDDirectoryPaths: []string{
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/rest"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration-operator/operators/klusterlet"
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
@@ -506,7 +506,7 @@ var _ = ginkgo.Describe("Klusterlet", func() {
|
||||
}
|
||||
gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1))
|
||||
gomega.Expect(len(actual.Spec.Template.Spec.Containers[0].Args)).Should(gomega.Equal(8))
|
||||
return actual.Spec.Template.Spec.Containers[0].Args[2] == "--cluster-name=cluster2"
|
||||
return actual.Spec.Template.Spec.Containers[0].Args[2] == "--spoke-cluster-name=cluster2"
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check if generations are correct
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -167,13 +168,15 @@ var _ = ginkgo.Describe("Addon Lease Resync", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel = runAgent("addontest", agentOptions, spokeCfg)
|
||||
})
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -41,13 +42,15 @@ var _ = ginkgo.Describe("Addon Registration", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel = runAgent("addontest", agentOptions, spokeCfg)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package registration_test
|
||||
|
||||
import (
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -20,13 +21,15 @@ var _ = ginkgo.Describe("Certificate Rotation", func() {
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "rotationtest", "hub-kubeconfig")
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel := runAgent("rotationtest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"os"
|
||||
"path"
|
||||
@@ -91,12 +92,13 @@ var _ = ginkgo.Describe("Disaster Recovery", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
return runAgent("addontest", agentOptions, spokeCfg)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -32,12 +33,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
ginkgo.It("managed cluster lease should be updated constantly", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -50,12 +52,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
ginkgo.It("managed cluster available condition should be recovered after its lease update is recovered", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-availabletest", agentOptions, spokeCfg)
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, util.TestLeaseDurationSeconds)
|
||||
@@ -69,12 +72,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
assertAvailableCondition(managedClusterName, metav1.ConditionUnknown, gracePeriod)
|
||||
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop = runAgent("cluster-availabletest", agentOptions, spokeCfg)
|
||||
defer stop()
|
||||
|
||||
@@ -86,12 +90,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
ginkgo.It("managed cluster available condition should be recovered after the cluster is restored", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -136,12 +141,13 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
ginkgo.It("should use a short lease duration", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, 60)
|
||||
|
||||
@@ -2,6 +2,7 @@ package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -34,12 +35,13 @@ var _ = ginkgo.Describe("Agent Recovery", func() {
|
||||
|
||||
// run registration agent with an invalid bootstrap kubeconfig
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("bootstrap-recoverytest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
@@ -121,12 +123,13 @@ var _ = ginkgo.Describe("Agent Recovery", func() {
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: spokeClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = spokeClusterName
|
||||
|
||||
cancel := runAgent("hubkubeconfig-recoverytest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -33,12 +34,13 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
|
||||
ginkgo.By("run registration agent")
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
stopAgent := runAgent("restart-test", agentOptions, spokeCfg)
|
||||
|
||||
@@ -109,12 +111,13 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
|
||||
ginkgo.By("Restart registration agent")
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, spokeCfg)
|
||||
defer stopAgent()
|
||||
|
||||
@@ -161,13 +164,13 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
|
||||
ginkgo.By("run registration agent")
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent := runAgent("restart-test", agentOptions, spokeCfg)
|
||||
|
||||
ginkgo.By("Check existence of csr and ManagedCluster")
|
||||
@@ -223,12 +226,13 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
ginkgo.By("Restart registration agent with a new cluster name")
|
||||
managedClusterName = "restart-test-cluster3"
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, spokeCfg)
|
||||
defer stopAgent()
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -27,12 +28,13 @@ var _ = ginkgo.Describe("Cluster Auto Approval", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel := runAgent("autoapprovaltest", agentOptions, spokeCfg)
|
||||
|
||||
@@ -3,6 +3,7 @@ package registration_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"reflect"
|
||||
@@ -48,13 +49,14 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: maxCustomClusterClaims,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel = runAgent("claimtest", agentOptions, spokeCfg)
|
||||
})
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -24,12 +25,13 @@ var _ = ginkgo.Describe("Joining Process", func() {
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("joiningtest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -2,6 +2,7 @@ package registration_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"path"
|
||||
"time"
|
||||
@@ -30,12 +31,13 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() {
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("resorucetest", agentOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
v1 "open-cluster-management.io/api/cluster/v1"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub/taint"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
@@ -34,12 +35,13 @@ var _ = ginkgo.Describe("ManagedCluster Taints Update", func() {
|
||||
// run registration agent
|
||||
go func() {
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
ClusterName: managedClusterName,
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
err := agentOptions.RunSpokeAgent(ctx, &controllercmd.ControllerContext{
|
||||
KubeConfig: spokeCfg,
|
||||
EventRecorder: util.NewIntegrationTestEventRecorder("cluster-tainttest"),
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"time"
|
||||
|
||||
@@ -29,11 +30,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -46,7 +48,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -54,7 +56,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -63,15 +65,15 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
var anotherAppliedManifestWorkName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
// Create another manifestworks with one shared resource.
|
||||
anotherWork = util.NewManifestWork(o.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
|
||||
anotherWork = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -81,7 +83,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
|
||||
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -94,7 +96,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() {
|
||||
// ensure configmap exists and get its uid
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
currentUID := curentConfigMap.UID
|
||||
|
||||
@@ -130,7 +132,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete one manifestwork
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
|
||||
@@ -147,7 +149,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
|
||||
gomega.Eventually(func() error {
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -178,7 +180,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
|
||||
// ensure configmap exists and get its uid
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
currentUID := curentConfigMap.UID
|
||||
|
||||
@@ -214,10 +216,10 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Update one manifestwork to remove the shared resource
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure the resource is not tracked by the appliedmanifestwork.
|
||||
@@ -238,7 +240,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork
|
||||
gomega.Eventually(func() error {
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -271,8 +273,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.Context("Delete options", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
})
|
||||
|
||||
@@ -281,7 +283,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -296,7 +298,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -309,7 +311,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -322,12 +324,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
@@ -343,14 +345,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -365,7 +367,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -378,21 +380,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// One of the resource should be deleted.
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
|
||||
// One of the resource should be kept
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -404,14 +406,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -426,7 +428,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -440,15 +442,15 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Remove the resource from the manifests
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -459,7 +461,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Sleep 5 second and check the resource should be kept
|
||||
time.Sleep(5 * time.Second)
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -471,14 +473,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -493,7 +495,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -507,19 +509,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Remove the delete option
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.DeleteOption = nil
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -532,19 +534,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// All of the resource should be deleted.
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"time"
|
||||
@@ -33,13 +34,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
err := features.DefaultSpokeWorkMutableFeatureGate.Set("ExecutorValidatingCaches=true")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -53,7 +55,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Executor = executor
|
||||
})
|
||||
@@ -63,7 +65,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(
|
||||
context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -71,14 +73,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -86,7 +88,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Executor does not have permission", func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -103,10 +105,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have permission to partial resources", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -119,16 +121,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -140,7 +142,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -154,20 +156,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap cm1 exist and cm2 not exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("Executor has permission for all resources", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -180,16 +182,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -201,7 +203,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -221,14 +223,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -237,10 +239,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is foreground", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -253,16 +255,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -274,7 +276,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -291,10 +293,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is orphan", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -307,16 +309,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -331,7 +333,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
work.Spec.DeleteOption = &workapiv1.DeleteOption{
|
||||
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -348,10 +350,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is selectively orphan", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -364,16 +366,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -391,13 +393,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
OrphaningRules: []workapiv1.OrphaningRule{
|
||||
{
|
||||
Resource: "configmaps",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -411,11 +413,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap cm1 exist and cm2 not exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
@@ -424,20 +426,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewRoleForManifest(o.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewRoleForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{
|
||||
Verbs: []string{"create", "update", "patch", "get", "list", "delete"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"configmaps"},
|
||||
})),
|
||||
util.ToManifest(util.NewRoleBindingForManifest(o.SpokeClusterName, "role-cm-creator-binding",
|
||||
util.ToManifest(util.NewRoleBindingForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator-binding",
|
||||
rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: "role-cm-creator",
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
})),
|
||||
}
|
||||
@@ -445,7 +447,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -454,11 +456,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("no permission", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -470,16 +472,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -491,7 +493,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -507,17 +509,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap not exist
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("no permission for already existing resource", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -529,16 +531,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -551,11 +553,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// make the role exist with lower permission
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "role-cm-creator",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -567,7 +569,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -584,17 +586,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap not exist
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("with permission", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -611,16 +613,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -632,7 +634,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -648,17 +650,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmaps exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("with permission for already exist resource", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -675,16 +677,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -697,11 +699,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// make the role exist with lower permission
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "role-cm-creator",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -713,7 +715,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -729,7 +731,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmaps exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
@@ -785,13 +787,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -799,7 +801,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Permission change", func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -813,8 +815,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
ginkgo.By("ensure configmaps do not exist")
|
||||
util.AssertNonexistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
createRBAC(o.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.SpokeClusterName, "cm2")
|
||||
createRBAC(o.AgentOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm2")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
@@ -825,8 +827,8 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
ginkgo.By("ensure configmaps cm1 and cm2 exist")
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
deleteRBAC(o.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.SpokeClusterName, "cm3")
|
||||
deleteRBAC(o.AgentOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm3")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse,
|
||||
metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -837,15 +839,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
ginkgo.By("ensure configmap cm1 cm2 exist(will not delete the applied resource even the permison is revoked) but cm3 does not exist")
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"time"
|
||||
@@ -32,11 +33,12 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -45,18 +47,18 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.Context("Deployment Status feedback", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
u, _, err := util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
|
||||
u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(u))
|
||||
|
||||
@@ -77,7 +79,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -88,7 +90,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
@@ -98,7 +100,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Update Deployment status on spoke
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -107,13 +109,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 2
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -160,7 +162,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Update replica of deployment
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -169,13 +171,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 3
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if the status of deployment is synced on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -227,7 +229,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -248,7 +250,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -257,7 +259,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,13 +271,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -308,7 +310,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should return none for resources with no wellKnowne status", func() {
|
||||
sa, _ := util.NewServiceAccount(o.SpokeClusterName, "sa")
|
||||
sa, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa")
|
||||
work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, util.ToManifest(sa))
|
||||
|
||||
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
@@ -316,7 +318,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -329,7 +331,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "",
|
||||
Resource: "serviceaccounts",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "sa",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -340,7 +342,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -350,7 +352,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Update Deployment status on spoke
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -359,13 +361,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 2
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -421,7 +423,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -438,7 +440,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -450,7 +452,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
u, _, err := util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
|
||||
u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(u))
|
||||
|
||||
@@ -473,7 +475,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -490,7 +492,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -499,7 +501,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -511,13 +513,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
util "open-cluster-management.io/ocm/test/integration/util"
|
||||
"os"
|
||||
"path"
|
||||
@@ -33,13 +34,14 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
o.AgentID = utilrand.String(5)
|
||||
o.AppliedManifestWorkEvictionGracePeriod = 10 * time.Second
|
||||
|
||||
ns = &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -48,11 +50,11 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
go startWorkAgent(ctx, o)
|
||||
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}
|
||||
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "unmanaged-appliedwork", manifests)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "unmanaged-appliedwork", manifests)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -62,7 +64,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -126,7 +128,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
newOption := spoke.NewWorkloadAgentOptions()
|
||||
newOption.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newOption.SpokeClusterName = o.SpokeClusterName
|
||||
newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName
|
||||
newOption.AgentID = utilrand.String(5)
|
||||
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
@@ -135,7 +137,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
go startWorkAgent(ctx, newOption)
|
||||
|
||||
// Create the same manifestwork with the same name on new hub.
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -145,7 +147,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
// ensure the resource has two ownerrefs
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +173,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
newOption := spoke.NewWorkloadAgentOptions()
|
||||
newOption.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newOption.SpokeClusterName = o.SpokeClusterName
|
||||
newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName
|
||||
newOption.AgentID = o.AgentID
|
||||
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
@@ -180,7 +182,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
go startWorkAgent(ctx, newOption)
|
||||
|
||||
// Create the same manifestwork with the same name.
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -202,7 +204,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
// ensure the resource has only one ownerref
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -261,7 +263,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
go startWorkAgent(ctx, o)
|
||||
|
||||
// recreate the work on the hub
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// ensure the appliemanifestwork eviction is stopped
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"time"
|
||||
|
||||
@@ -30,11 +31,12 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -47,14 +49,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -62,7 +64,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
var object *unstructured.Unstructured
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
|
||||
object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(object))
|
||||
})
|
||||
@@ -73,7 +75,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -82,7 +84,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -92,13 +94,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -106,7 +108,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -124,7 +126,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
var object *unstructured.Unstructured
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
object, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
|
||||
object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(object))
|
||||
})
|
||||
@@ -135,7 +137,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -144,7 +146,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -153,18 +155,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
// update work
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -183,7 +185,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -192,7 +194,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -203,7 +205,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
patch, err := object.MarshalJSON()
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Patch(
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Patch(
|
||||
context.Background(), "deploy1", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -211,13 +213,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -228,13 +230,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
// remove the replica field and the apply should work
|
||||
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -248,7 +250,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -257,7 +259,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -267,13 +269,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
objCopy := object.DeepCopy()
|
||||
// work1 does not want to own replica field
|
||||
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
|
||||
work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
{
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -286,7 +288,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -296,13 +298,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -311,7 +313,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -327,13 +329,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -348,7 +350,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -357,7 +359,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
@@ -367,13 +369,13 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
objCopy := object.DeepCopy()
|
||||
// work1 does not want to own replica field
|
||||
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
|
||||
work1 := util.NewManifestWork(o.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
{
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.SpokeClusterName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -386,14 +388,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -407,18 +409,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
|
||||
// update deleteOption of the first work
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package work
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
"time"
|
||||
|
||||
@@ -44,12 +45,13 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.SpokeClusterName = utilrand.String(5)
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.SpokeClusterName
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -62,30 +64,30 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.SpokeClusterName, "", manifests)
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
if !errors.IsNotFound(err) {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("work %s in namespace %s still exists", work.Name, o.SpokeClusterName)
|
||||
return fmt.Errorf("work %s in namespace %s still exists", work.Name, o.AgentOptions.SpokeClusterName)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
if cancel != nil {
|
||||
@@ -96,7 +98,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.Context("With a single manifest", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}
|
||||
})
|
||||
|
||||
@@ -116,13 +118,13 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -144,14 +146,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete work successfully", func() {
|
||||
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -162,8 +164,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap("non-existent-namespace", "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)),
|
||||
}
|
||||
})
|
||||
|
||||
@@ -185,15 +187,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)),
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -218,14 +220,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete work successfully", func() {
|
||||
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -249,7 +251,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
objects = append(objects, obj)
|
||||
|
||||
// cr
|
||||
obj, gvr, err = util.GuestbookCr(o.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err = util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, obj)
|
||||
@@ -291,7 +293,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update object label
|
||||
obj, gvr, err := util.GuestbookCr(o.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
|
||||
@@ -341,7 +343,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update object finalizer
|
||||
obj, gvr, err := util.GuestbookCr(o.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
|
||||
@@ -405,12 +407,12 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// delete manifest work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
@@ -444,19 +446,19 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
gvrs = nil
|
||||
objects = nil
|
||||
|
||||
u, gvr := util.NewServiceAccount(o.SpokeClusterName, "sa")
|
||||
u, gvr := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr = util.NewRole(o.SpokeClusterName, "role1")
|
||||
u, gvr = util.NewRole(o.AgentOptions.SpokeClusterName, "role1")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr = util.NewRoleBinding(o.SpokeClusterName, "rolebinding1", "sa", "role1")
|
||||
u, gvr = util.NewRoleBinding(o.AgentOptions.SpokeClusterName, "rolebinding1", "sa", "role1")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "sa")
|
||||
u, gvr, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
@@ -511,9 +513,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.By("update manifests in work")
|
||||
oldServiceAccount := objects[0]
|
||||
gvrs[0], gvrs[3] = gvrs[3], gvrs[0]
|
||||
u, _ := util.NewServiceAccount(o.SpokeClusterName, "admin")
|
||||
u, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "admin")
|
||||
objects[3] = u
|
||||
u, _, err = util.NewDeployment(o.SpokeClusterName, "deploy1", "admin")
|
||||
u, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "admin")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
objects[0] = u
|
||||
|
||||
@@ -527,10 +529,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
updateTime := metav1.Now()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check existence of all maintained resources")
|
||||
@@ -544,7 +546,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.By("check if deployment is updated")
|
||||
gomega.Eventually(func() error {
|
||||
u, err := util.GetResource(o.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
|
||||
u, err := util.GetResource(o.AgentOptions.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -558,7 +560,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.By("check if LastTransitionTime is updated")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -602,14 +604,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
var finalizer = "cluster.open-cluster-management.io/testing"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err = util.RemoveConfigmapFinalizers(spokeKubeClient, o.SpokeClusterName, "cm1", "cm2", "cm3")
|
||||
err = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1", "cm2", "cm3")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -621,10 +623,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = manifests[1:]
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -636,7 +638,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
// remove finalizers of cm1
|
||||
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, o.SpokeClusterName, "cm1")
|
||||
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1")
|
||||
}()
|
||||
|
||||
// check if resource created by stale manifest is deleted once it is removed from applied resource list
|
||||
@@ -655,7 +657,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user