mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 10:00:11 +00:00
✨ run work and registration as a single binary (#201)
* run registratin/work together Signed-off-by: Jian Qiu <jqiu@redhat.com> * Fix integration test and lint issue Signed-off-by: Jian Qiu <jqiu@redhat.com> * Update operator to deploy singleton mode Signed-off-by: Jian Qiu <jqiu@redhat.com> * Update deps Signed-off-by: Jian Qiu <jqiu@redhat.com> --------- Signed-off-by: Jian Qiu <jqiu@redhat.com>
This commit is contained in:
29
.github/workflows/e2e.yml
vendored
29
.github/workflows/e2e.yml
vendored
@@ -75,3 +75,32 @@ jobs:
|
||||
IMAGE_TAG=e2e KLUSTERLET_DEPLOY_MODE=Hosted make test-e2e
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
e2e-singleton:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
- name: Setup kind
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: v0.17.0
|
||||
- name: install imagebuilder
|
||||
run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3
|
||||
- name: Build images
|
||||
run: IMAGE_TAG=e2e make images
|
||||
- name: Load images
|
||||
run: |
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration-operator:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/registration:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/work:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/placement:e2e
|
||||
kind load docker-image --name=kind quay.io/open-cluster-management/addon-manager:e2e
|
||||
- name: Test E2E
|
||||
run: |
|
||||
IMAGE_TAG=e2e KLUSTERLET_DEPLOY_MODE=Singleton make test-e2e
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
|
||||
@@ -52,6 +52,7 @@ func newNucleusCommand() *cobra.Command {
|
||||
|
||||
cmd.AddCommand(hub.NewHubOperatorCmd())
|
||||
cmd.AddCommand(spoke.NewKlusterletOperatorCmd())
|
||||
cmd.AddCommand(spoke.NewKlusterletAgentCmd())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -340,6 +340,15 @@ spec:
|
||||
- clustermanagementaddons/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- addon.open-cluster-management.io
|
||||
resources:
|
||||
- addondeploymentconfigs
|
||||
- addontemplates
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
@@ -364,12 +373,11 @@ spec:
|
||||
- update
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resourceNames:
|
||||
- kubernetes.io/kube-apiserver-client
|
||||
resources:
|
||||
- signers
|
||||
verbs:
|
||||
- approve
|
||||
- sign
|
||||
- apiGroups:
|
||||
- cluster.open-cluster-management.io
|
||||
resources:
|
||||
|
||||
@@ -7,6 +7,7 @@ spec:
|
||||
mode: Default
|
||||
registrationImagePullSpec: quay.io/open-cluster-management/registration
|
||||
workImagePullSpec: quay.io/open-cluster-management/work
|
||||
imagePullSpec: quay.io/open-cluster-management/registration-operator
|
||||
clusterName: cluster1
|
||||
namespace: open-cluster-management-agent
|
||||
externalServerURLs:
|
||||
|
||||
@@ -20,6 +20,7 @@ metadata:
|
||||
"url": "https://localhost"
|
||||
}
|
||||
],
|
||||
"imagePullSpec": "quay.io/open-cluster-management/registration-operator",
|
||||
"namespace": "open-cluster-management-agent",
|
||||
"registrationConfiguration": {
|
||||
"featureGates": [
|
||||
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
description: DeployOption contains the options of deploying a klusterlet
|
||||
properties:
|
||||
mode:
|
||||
description: 'Mode can be Default or Hosted. It is Default mode if not specified In Default mode, all klusterlet related resources are deployed on the managed cluster. In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of "external-managed-kubeconfig"(a kubeconfig of managed-cluster with cluster-admin permission). Note: Do not modify the Mode field once it''s applied.'
|
||||
description: 'Mode can be Default, Hosted or Singleton. It is Default mode if not specified In Default mode, all klusterlet related resources are deployed on the managed cluster. In Hosted mode, only crd and configurations are installed on the spoke/managed cluster. Controllers run in another cluster (defined as management-cluster) and connect to the mangaged cluster with the kubeconfig in secret of "external-managed-kubeconfig"(a kubeconfig of managed-cluster with cluster-admin permission). In Singleton mode, registration/work agent is started as a single deployment. Note: Do not modify the Mode field once it''s applied.'
|
||||
type: string
|
||||
type: object
|
||||
externalServerURLs:
|
||||
@@ -69,6 +69,9 @@ spec:
|
||||
- hostname
|
||||
- ip
|
||||
type: object
|
||||
imagePullSpec:
|
||||
description: ImagePullSpec represents the desired image configuration of agent, it takes effect only when singleton mode is set. quay.io/open-cluster-management.io/registration-operator:latest will be used if unspecified
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace to deploy the agent on the managed cluster. The namespace must have a prefix of "open-cluster-management-", and if it is not set, the namespace of "open-cluster-management-agent" is used to deploy agent. In addition, the add-ons are deployed to the namespace of "{Namespace}-addon". In the Hosted mode, this namespace still exists on the managed cluster to contain necessary resources, like service accounts, roles and rolebindings, while the agent is deployed to the namespace with the same name as klusterlet on the management cluster.
|
||||
maxLength: 63
|
||||
|
||||
@@ -13,5 +13,5 @@ roleRef:
|
||||
name: open-cluster-management:{{ .KlusterletName }}-registration:addon-management
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
|
||||
@@ -9,5 +9,5 @@ roleRef:
|
||||
name: open-cluster-management:{{ .KlusterletName }}-registration:agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
imagePullSecrets:
|
||||
- name: open-cluster-management-image-pull-credentials
|
||||
|
||||
@@ -12,5 +12,5 @@ roleRef:
|
||||
name: admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
|
||||
@@ -10,5 +10,5 @@ roleRef:
|
||||
name: open-cluster-management:{{ .KlusterletName }}-work:execution
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
|
||||
@@ -9,5 +9,5 @@ roleRef:
|
||||
name: open-cluster-management:{{ .KlusterletName }}-work:agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .KlusterletNamespace }}
|
||||
imagePullSecrets:
|
||||
- name: open-cluster-management-image-pull-credentials
|
||||
|
||||
114
manifests/klusterlet/management/klusterlet-agent-deployment.yaml
Normal file
114
manifests/klusterlet/management/klusterlet-agent-deployment.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: {{ .KlusterletName }}-agent
|
||||
namespace: {{ .AgentNamespace }}
|
||||
labels:
|
||||
app: klusterlet-agent
|
||||
createdBy: klusterlet
|
||||
spec:
|
||||
replicas: {{ .Replica }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: klusterlet-agent
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
|
||||
labels:
|
||||
app: klusterlet-agent
|
||||
spec:
|
||||
{{if .HubApiServerHostAlias }}
|
||||
hostAliases:
|
||||
- ip: {{ .HubApiServerHostAlias.IP }}
|
||||
hostnames:
|
||||
- {{ .HubApiServerHostAlias.Hostname }}
|
||||
{{end}}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 70
|
||||
podAffinityTerm:
|
||||
topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- klusterlet-registration-agent
|
||||
- weight: 30
|
||||
podAffinityTerm:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- klusterlet-registration-agent
|
||||
serviceAccountName: {{ .KlusterletName }}-agent-sa
|
||||
containers:
|
||||
- name: klusterlet-agent
|
||||
image: {{ .SingletonImage }}
|
||||
args:
|
||||
- "/registration-operator"
|
||||
- "agent"
|
||||
- "--spoke-cluster-name={{ .ClusterName }}"
|
||||
- "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig"
|
||||
- "--agent-id={{ .AgentID }}"
|
||||
{{ if gt (len .WorkFeatureGates) 0 }}
|
||||
{{range .WorkFeatureGates}}
|
||||
- {{ . }}
|
||||
{{end}}
|
||||
{{ end }}
|
||||
{{ if gt (len .RegistrationFeatureGates) 0 }}
|
||||
{{range .RegistrationFeatureGates}}
|
||||
- {{ . }}
|
||||
{{end}}
|
||||
{{ end }}
|
||||
{{if .ExternalServerURL}}
|
||||
- "--spoke-external-server-urls={{ .ExternalServerURL }}"
|
||||
{{end}}
|
||||
- "--terminate-on-files=/spoke/hub-kubeconfig/kubeconfig"
|
||||
{{if eq .Replica 1}}
|
||||
- "--disable-leader-election"
|
||||
{{end}}
|
||||
{{if gt .ClientCertExpirationSeconds 0}}
|
||||
- "--client-cert-expiration-seconds={{ .ClientCertExpirationSeconds }}"
|
||||
{{end}}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- name: bootstrap-secret
|
||||
mountPath: "/spoke/bootstrap"
|
||||
readOnly: true
|
||||
- name: hub-kubeconfig
|
||||
mountPath: "/spoke/hub-kubeconfig"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 8443
|
||||
initialDelaySeconds: 2
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 8443
|
||||
initialDelaySeconds: 2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 2m
|
||||
memory: 16Mi
|
||||
volumes:
|
||||
- name: bootstrap-secret
|
||||
secret:
|
||||
secretName: {{ .BootStrapKubeConfigSecret }}
|
||||
- name: hub-kubeconfig
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
@@ -11,5 +11,5 @@ roleRef:
|
||||
name: open-cluster-management:management:{{ .KlusterletName }}-registration:addon-management
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
|
||||
@@ -9,5 +9,5 @@ roleRef:
|
||||
name: open-cluster-management:management:{{ .KlusterletName }}:extension-apiserver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
|
||||
@@ -10,5 +10,5 @@ roleRef:
|
||||
name: open-cluster-management:management:{{ .KlusterletName }}-registration:agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .KlusterletName }}-registration-sa
|
||||
name: {{ .RegistrationServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
imagePullSecrets:
|
||||
- name: open-cluster-management-image-pull-credentials
|
||||
|
||||
@@ -9,5 +9,5 @@ roleRef:
|
||||
name: open-cluster-management:management:{{ .KlusterletName }}:extension-apiserver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
|
||||
@@ -10,5 +10,5 @@ roleRef:
|
||||
name: open-cluster-management:management:{{ .KlusterletName }}-work:agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .KlusterletName }}-work-sa
|
||||
name: {{ .WorkServiceAccount }}
|
||||
namespace: {{ .AgentNamespace }}
|
||||
imagePullSecrets:
|
||||
- name: open-cluster-management-image-pull-credentials
|
||||
|
||||
@@ -5,12 +5,22 @@ import (
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/operator/operators/klusterlet"
|
||||
registration "open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
singletonspoke "open-cluster-management.io/ocm/pkg/singleton/spoke"
|
||||
"open-cluster-management.io/ocm/pkg/version"
|
||||
work "open-cluster-management.io/ocm/pkg/work/spoke"
|
||||
)
|
||||
|
||||
// NewKlusterletOperatorCmd generatee a command to start klusterlet operator
|
||||
const agentCmdName = "agent"
|
||||
|
||||
// NewKlusterletOperatorCmd generate a command to start klusterlet operator
|
||||
func NewKlusterletOperatorCmd() *cobra.Command {
|
||||
|
||||
options := klusterlet.Options{}
|
||||
@@ -28,3 +38,31 @@ func NewKlusterletOperatorCmd() *cobra.Command {
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewKlusterletAgentCmd is to start the singleton agent including registration/work
|
||||
func NewKlusterletAgentCmd() *cobra.Command {
|
||||
commonOptions := commonoptions.NewAgentOptions()
|
||||
workOptions := work.NewWorkloadAgentOptions()
|
||||
registrationOption := registration.NewSpokeAgentOptions()
|
||||
|
||||
agentConfig := singletonspoke.NewAgentConfig(commonOptions, registrationOption, workOptions)
|
||||
cmdConfig := controllercmd.
|
||||
NewControllerCommandConfig("klusterlet", version.Get(), agentConfig.RunSpokeAgent)
|
||||
cmd := cmdConfig.NewCommandWithContext(context.TODO())
|
||||
cmd.Use = agentCmdName
|
||||
cmd.Short = "Start the klusterlet agent"
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
commonOptions.AddFlags(flags)
|
||||
workOptions.AddFlags(flags)
|
||||
registrationOption.AddFlags(flags)
|
||||
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates))
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates))
|
||||
features.SpokeMutableFeatureGate.AddFlag(flags)
|
||||
|
||||
// add disable leader election flag
|
||||
flags.BoolVar(&cmdConfig.DisableLeaderElection, "disable-leader-election", false, "Disable leader election for the agent.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -5,23 +5,34 @@ import (
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
"open-cluster-management.io/ocm/pkg/version"
|
||||
)
|
||||
|
||||
func NewRegistrationAgent() *cobra.Command {
|
||||
agentOptions := spoke.NewSpokeAgentOptions()
|
||||
commonOptions := commonoptions.NewAgentOptions()
|
||||
cfg := spoke.NewSpokeAgentConfig(commonOptions, agentOptions)
|
||||
cmdConfig := controllercmd.
|
||||
NewControllerCommandConfig("registration-agent", version.Get(), agentOptions.RunSpokeAgent)
|
||||
NewControllerCommandConfig("registration-agent", version.Get(), cfg.RunSpokeAgent)
|
||||
|
||||
cmd := cmdConfig.NewCommandWithContext(context.TODO())
|
||||
cmd.Use = "agent"
|
||||
cmd.Use = agentCmdName
|
||||
cmd.Short = "Start the Cluster Registration Agent"
|
||||
|
||||
flags := cmd.Flags()
|
||||
commonOptions.AddFlags(flags)
|
||||
agentOptions.AddFlags(flags)
|
||||
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates))
|
||||
features.SpokeMutableFeatureGate.AddFlag(flags)
|
||||
|
||||
flags.BoolVar(&cmdConfig.DisableLeaderElection, "disable-leader-election", false, "Disable leader election for the agent.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -5,24 +5,34 @@ import (
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/version"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke"
|
||||
)
|
||||
|
||||
// NewWorkAgent generates a command to start work agent
|
||||
func NewWorkAgent() *cobra.Command {
|
||||
o := spoke.NewWorkloadAgentOptions()
|
||||
commonOptions := commonoptions.NewAgentOptions()
|
||||
agentOption := spoke.NewWorkloadAgentOptions()
|
||||
cfg := spoke.NewWorkAgentConfig(commonOptions, agentOption)
|
||||
cmdConfig := controllercmd.
|
||||
NewControllerCommandConfig("work-agent", version.Get(), o.RunWorkloadAgent)
|
||||
NewControllerCommandConfig("work-agent", version.Get(), cfg.RunWorkloadAgent)
|
||||
cmd := cmdConfig.NewCommandWithContext(context.TODO())
|
||||
cmd.Use = "agent"
|
||||
cmd.Use = agentCmdName
|
||||
cmd.Short = "Start the Work Agent"
|
||||
|
||||
o.AddFlags(cmd)
|
||||
|
||||
// add disable leader election flag
|
||||
flags := cmd.Flags()
|
||||
commonOptions.AddFlags(flags)
|
||||
agentOption.AddFlags(flags)
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates))
|
||||
features.SpokeMutableFeatureGate.AddFlag(flags)
|
||||
|
||||
flags.BoolVar(&cmdConfig.DisableLeaderElection, "disable-leader-election", false, "Disable leader election for the agent.")
|
||||
|
||||
return cmd
|
||||
|
||||
@@ -2,28 +2,55 @@ package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/registration"
|
||||
)
|
||||
|
||||
const (
|
||||
// spokeAgentNameLength is the length of the spoke agent name which is generated automatically
|
||||
spokeAgentNameLength = 5
|
||||
// defaultSpokeComponentNamespace is the default namespace in which the spoke agent is deployed
|
||||
defaultSpokeComponentNamespace = "open-cluster-management-agent"
|
||||
)
|
||||
|
||||
// AgentOptions is the common agent options
|
||||
type AgentOptions struct {
|
||||
ComponentNamespace string
|
||||
SpokeKubeconfigFile string
|
||||
SpokeClusterName string
|
||||
HubKubeconfigDir string
|
||||
HubKubeconfigFile string
|
||||
AgentID string
|
||||
Burst int
|
||||
QPS float32
|
||||
}
|
||||
|
||||
// NewWorkloadAgentOptions returns the flags with default value set
|
||||
func NewAgentOptions() *AgentOptions {
|
||||
return &AgentOptions{
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
opts := &AgentOptions{
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ComponentNamespace: defaultSpokeComponentNamespace,
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
}
|
||||
// get component namespace of spoke agent
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err == nil {
|
||||
opts.ComponentNamespace = string(nsBytes)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) {
|
||||
@@ -33,6 +60,10 @@ func (o *AgentOptions) AddFlags(flags *pflag.FlagSet) {
|
||||
_ = flags.MarkDeprecated("cluster-name", "use spoke-cluster-name flag")
|
||||
flags.StringVar(&o.SpokeClusterName, "cluster-name", o.SpokeClusterName,
|
||||
"Name of the spoke cluster.")
|
||||
flags.StringVar(&o.HubKubeconfigDir, "hub-kubeconfig-dir", o.HubKubeconfigDir,
|
||||
"The mount path of hub-kubeconfig-secret in the container.")
|
||||
flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.")
|
||||
flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the agent")
|
||||
flags.Float32Var(&o.QPS, "spoke-kube-api-qps", o.QPS, "QPS to use while talking with apiserver on spoke cluster.")
|
||||
flags.IntVar(&o.Burst, "spoke-kube-api-burst", o.Burst, "Burst to use while talking with apiserver on spoke cluster.")
|
||||
}
|
||||
@@ -62,3 +93,100 @@ func (o *AgentOptions) Validate() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Complete fills in missing values.
|
||||
func (o *AgentOptions) Complete() error {
|
||||
if len(o.HubKubeconfigFile) == 0 {
|
||||
o.HubKubeconfigFile = path.Join(o.HubKubeconfigDir, clientcert.KubeconfigFile)
|
||||
}
|
||||
|
||||
// load or generate cluster/agent names
|
||||
o.SpokeClusterName, o.AgentID = o.getOrGenerateClusterAgentID()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOrGenerateClusterAgentID returns cluster name and agent id.
|
||||
// Rules for picking up cluster name:
|
||||
// 1. Use cluster name from input arguments if 'spoke-cluster-name' is specified;
|
||||
// 2. Parse cluster name from the common name of the certification subject if the certification exists;
|
||||
// 3. Fallback to cluster name in the mounted secret if it exists;
|
||||
// 4. TODO: Read cluster name from openshift struct if the agent is running in an openshift cluster;
|
||||
// 5. Generate a random cluster name then;
|
||||
|
||||
// Rules for picking up agent id:
|
||||
// 1. Read from the flag "agent-id" at first.
|
||||
// 2. Parse agent name from the common name of the certification subject if the certification exists;
|
||||
// 3. Fallback to agent name in the mounted secret if it exists;
|
||||
// 4. Generate a random agent name then;
|
||||
func (o *AgentOptions) getOrGenerateClusterAgentID() (string, string) {
|
||||
if len(o.SpokeClusterName) > 0 && len(o.AgentID) > 0 {
|
||||
return o.SpokeClusterName, o.AgentID
|
||||
}
|
||||
// try to load cluster/agent name from tls certification
|
||||
var clusterNameInCert, agentNameInCert string
|
||||
certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certData, certErr := os.ReadFile(path.Clean(certPath))
|
||||
if certErr == nil {
|
||||
clusterNameInCert, agentNameInCert, _ = registration.GetClusterAgentNamesFromCertificate(certData)
|
||||
}
|
||||
|
||||
clusterName := o.SpokeClusterName
|
||||
// if cluster name is not specified with input argument, try to load it from file
|
||||
if clusterName == "" {
|
||||
// TODO, read cluster name from openshift struct if the spoke agent is running in an openshift cluster
|
||||
|
||||
// and then load the cluster name from the mounted secret
|
||||
clusterNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.ClusterNameFile)
|
||||
clusterNameBytes, err := os.ReadFile(path.Clean(clusterNameFilePath))
|
||||
switch {
|
||||
case len(clusterNameInCert) > 0:
|
||||
// use cluster name loaded from the tls certification
|
||||
clusterName = clusterNameInCert
|
||||
if clusterNameInCert != string(clusterNameBytes) {
|
||||
klog.Warningf("Use cluster name %q in certification instead of %q in the mounted secret", clusterNameInCert, string(clusterNameBytes))
|
||||
}
|
||||
case err == nil:
|
||||
// use cluster name load from the mounted secret
|
||||
clusterName = string(clusterNameBytes)
|
||||
default:
|
||||
// generate random cluster name
|
||||
clusterName = generateClusterName()
|
||||
}
|
||||
}
|
||||
|
||||
agentID := o.AgentID
|
||||
// try to load agent name from the mounted secret
|
||||
if len(agentID) == 0 {
|
||||
agentIDFilePath := path.Join(o.HubKubeconfigDir, clientcert.AgentNameFile)
|
||||
agentIDBytes, err := os.ReadFile(path.Clean(agentIDFilePath))
|
||||
switch {
|
||||
case len(agentNameInCert) > 0:
|
||||
// use agent name loaded from the tls certification
|
||||
agentID = agentNameInCert
|
||||
if agentNameInCert != agentID {
|
||||
klog.Warningf(
|
||||
"Use agent name %q in certification instead of %q in the mounted secret",
|
||||
agentNameInCert, agentID)
|
||||
}
|
||||
case err == nil:
|
||||
// use agent name loaded from the mounted secret
|
||||
agentID = string(agentIDBytes)
|
||||
default:
|
||||
// generate random agent name
|
||||
agentID = generateAgentName()
|
||||
}
|
||||
}
|
||||
|
||||
return clusterName, agentID
|
||||
}
|
||||
|
||||
// generateClusterName generates a name for spoke cluster
|
||||
func generateClusterName() string {
|
||||
return string(uuid.NewUUID())
|
||||
}
|
||||
|
||||
// generateAgentName generates a random name for spoke cluster agent
|
||||
func generateAgentName() string {
|
||||
return utilrand.String(spokeAgentNameLength)
|
||||
}
|
||||
|
||||
221
pkg/common/options/options_test.go
Normal file
221
pkg/common/options/options_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package options
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/registration"
|
||||
)
|
||||
|
||||
func TestComplete(t *testing.T) {
|
||||
// get component namespace
|
||||
var componentNamespace string
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
componentNamespace = defaultSpokeComponentNamespace
|
||||
} else {
|
||||
componentNamespace = string(nsBytes)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
secret *corev1.Secret
|
||||
expectedClusterName string
|
||||
expectedAgentName string
|
||||
}{
|
||||
{
|
||||
name: "generate random cluster/agent name",
|
||||
},
|
||||
{
|
||||
name: "specify cluster name",
|
||||
clusterName: "cluster1",
|
||||
expectedClusterName: "cluster1",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in secret with specified value",
|
||||
clusterName: "cluster1",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
|
||||
"cluster-name": []byte("cluster2"),
|
||||
"agent-name": []byte("agent2"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent2",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in cert with specified value",
|
||||
clusterName: "cluster1",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{
|
||||
"kubeconfig": testinghelpers.NewKubeconfig(nil, nil),
|
||||
"cluster-name": []byte("cluster3"),
|
||||
"agent-name": []byte("agent3"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent2",
|
||||
},
|
||||
{
|
||||
name: "take cluster/agent name from secret",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
|
||||
"cluster-name": []byte("cluster1"),
|
||||
"agent-name": []byte("agent1"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
{
|
||||
name: "take cluster/agent name from cert",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in secret with value from cert",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{
|
||||
"cluster-name": []byte("cluster2"),
|
||||
"agent-name": []byte("agent2"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// setup kube client
|
||||
objects := []runtime.Object{}
|
||||
if c.secret != nil {
|
||||
objects = append(objects, c.secret)
|
||||
}
|
||||
kubeClient := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
// create a tmp dir to dump hub kubeconfig
|
||||
dir, err := os.MkdirTemp("", "hub-kubeconfig")
|
||||
if err != nil {
|
||||
t.Error("unable to create a tmp dir")
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
options := NewAgentOptions()
|
||||
options.SpokeClusterName = c.clusterName
|
||||
options.HubKubeconfigDir = dir
|
||||
|
||||
err = registration.DumpSecret(
|
||||
kubeClient.CoreV1(), componentNamespace, "hub-kubeconfig-secret",
|
||||
options.HubKubeconfigDir, context.TODO(), eventstesting.NewTestingEventRecorder(t))
|
||||
|
||||
if err := options.Complete(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if options.ComponentNamespace == "" {
|
||||
t.Error("component namespace should not be empty")
|
||||
}
|
||||
if options.SpokeClusterName == "" {
|
||||
t.Error("cluster name should not be empty")
|
||||
}
|
||||
if options.AgentID == "" {
|
||||
t.Error("agent name should not be empty")
|
||||
}
|
||||
if len(c.expectedClusterName) > 0 && options.SpokeClusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.SpokeClusterName)
|
||||
}
|
||||
if len(c.expectedAgentName) > 0 && options.AgentID != c.expectedAgentName {
|
||||
t.Errorf("expect agent name %q but got %q", c.expectedAgentName, options.AgentID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty cluster name",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid cluster name format",
|
||||
clusterName: "test.cluster",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid passed",
|
||||
clusterName: "cluster-1",
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
options := NewAgentOptions()
|
||||
options.SpokeClusterName = c.clusterName
|
||||
err := options.Validate()
|
||||
if err == nil && c.expectedErr {
|
||||
t.Errorf("expect to get err")
|
||||
}
|
||||
if err != nil && !c.expectedErr {
|
||||
t.Errorf("expect not error but got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOrGenerateClusterAgentNames(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "testgetorgenerateclusteragentnames")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
options *AgentOptions
|
||||
expectedClusterName string
|
||||
expectedAgentName string
|
||||
}{
|
||||
{
|
||||
name: "cluster name is specified",
|
||||
options: &AgentOptions{SpokeClusterName: "cluster0"},
|
||||
expectedClusterName: "cluster0",
|
||||
},
|
||||
{
|
||||
name: "cluster name and agent name are in file",
|
||||
options: &AgentOptions{HubKubeconfigDir: tempDir},
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
if c.options.HubKubeconfigDir != "" {
|
||||
testinghelpers.WriteFile(path.Join(tempDir, clientcert.ClusterNameFile), []byte(c.expectedClusterName))
|
||||
testinghelpers.WriteFile(path.Join(tempDir, clientcert.AgentNameFile), []byte(c.expectedAgentName))
|
||||
}
|
||||
clusterName, agentName := c.options.getOrGenerateClusterAgentID()
|
||||
if clusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, clusterName)
|
||||
}
|
||||
|
||||
// agent name cannot be empty, it is either generated or from file
|
||||
if agentName == "" {
|
||||
t.Error("agent name should not be empty")
|
||||
}
|
||||
|
||||
if c.expectedAgentName != "" && c.expectedAgentName != agentName {
|
||||
t.Errorf("expect agent name %q but got %q", c.expectedAgentName, agentName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,19 +14,14 @@ var (
|
||||
// DefaultHubWorkMutableFeatureGate is made up of multiple mutable feature-gates for work controller
|
||||
DefaultHubWorkMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
|
||||
// DefaultSpokeWorkMutableFeatureGate is made up of multiple mutable feature-gates for work agent.
|
||||
DefaultSpokeWorkMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
|
||||
// DefaultSpokeRegistrationMutableFeatureGate is made up of multiple mutable feature-gates for registration agent.
|
||||
DefaultSpokeRegistrationMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
|
||||
// DefaultHubRegistrationMutableFeatureGate made up of multiple mutable feature-gates for registration hub controller.
|
||||
DefaultHubRegistrationMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
|
||||
// SpokeMutableFeatureGate of multiple mutable feature-gates for agent
|
||||
SpokeMutableFeatureGate = featuregate.NewFeatureGate()
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime.Must(DefaultHubWorkMutableFeatureGate.Add(ocmfeature.DefaultHubWorkFeatureGates))
|
||||
runtime.Must(DefaultSpokeWorkMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates))
|
||||
runtime.Must(DefaultSpokeRegistrationMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates))
|
||||
runtime.Must(DefaultHubRegistrationMutableFeatureGate.Add(ocmfeature.DefaultHubRegistrationFeatureGates))
|
||||
}
|
||||
|
||||
@@ -50,8 +50,8 @@ func ClusterManagerNamespace(clustermanagername string, mode operatorapiv1.Insta
|
||||
return ClusterManagerDefaultNamespace
|
||||
}
|
||||
|
||||
func KlusterletSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeyFunc {
|
||||
return func(obj runtime.Object) string {
|
||||
func KlusterletSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeysFunc {
|
||||
return func(obj runtime.Object) []string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
namespace := accessor.GetNamespace()
|
||||
name := accessor.GetName()
|
||||
@@ -60,24 +60,24 @@ func KlusterletSecretQueueKeyFunc(klusterletLister operatorlister.KlusterletList
|
||||
interestedObjectFound = true
|
||||
}
|
||||
if !interestedObjectFound {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
klusterlets, err := klusterletLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if klusterlet := FindKlusterletByNamespace(klusterlets, namespace); klusterlet != nil {
|
||||
return klusterlet.Name
|
||||
return []string{klusterlet.Name}
|
||||
}
|
||||
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
|
||||
func KlusterletDeploymentQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeyFunc {
|
||||
return func(obj runtime.Object) string {
|
||||
func KlusterletDeploymentQueueKeyFunc(klusterletLister operatorlister.KlusterletLister) factory.ObjectQueueKeysFunc {
|
||||
return func(obj runtime.Object) []string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
namespace := accessor.GetNamespace()
|
||||
name := accessor.GetName()
|
||||
@@ -86,24 +86,24 @@ func KlusterletDeploymentQueueKeyFunc(klusterletLister operatorlister.Klusterlet
|
||||
interestedObjectFound = true
|
||||
}
|
||||
if !interestedObjectFound {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
klusterlets, err := klusterletLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if klusterlet := FindKlusterletByNamespace(klusterlets, namespace); klusterlet != nil {
|
||||
return klusterlet.Name
|
||||
return []string{klusterlet.Name}
|
||||
}
|
||||
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
|
||||
func ClusterManagerDeploymentQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeyFunc {
|
||||
return func(obj runtime.Object) string {
|
||||
func ClusterManagerDeploymentQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeysFunc {
|
||||
return func(obj runtime.Object) []string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
name := accessor.GetName()
|
||||
namespace := accessor.GetNamespace()
|
||||
@@ -116,47 +116,43 @@ func ClusterManagerDeploymentQueueKeyFunc(clusterManagerLister operatorlister.Cl
|
||||
interestedObjectFound = true
|
||||
}
|
||||
if !interestedObjectFound {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
clustermanagers, err := clusterManagerLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
clustermanager, err := FindClusterManagerByNamespace(namespace, clustermanagers)
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
return clustermanager.Name
|
||||
return []string{clustermanager.Name}
|
||||
}
|
||||
}
|
||||
|
||||
func ClusterManagerQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeyFunc {
|
||||
func ClusterManagerQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeysFunc {
|
||||
return clusterManagerByNamespaceQueueKeyFunc(clusterManagerLister)
|
||||
}
|
||||
|
||||
func ClusterManagerConfigmapQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeyFunc {
|
||||
return clusterManagerByNamespaceQueueKeyFunc(clusterManagerLister)
|
||||
}
|
||||
|
||||
func clusterManagerByNamespaceQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeyFunc {
|
||||
return func(obj runtime.Object) string {
|
||||
func clusterManagerByNamespaceQueueKeyFunc(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeysFunc {
|
||||
return func(obj runtime.Object) []string {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
namespace := accessor.GetNamespace()
|
||||
|
||||
clustermanagers, err := clusterManagerLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
clustermanager, err := FindClusterManagerByNamespace(namespace, clustermanagers)
|
||||
if err != nil {
|
||||
return ""
|
||||
return []string{}
|
||||
}
|
||||
|
||||
return clustermanager.Name
|
||||
return []string{clustermanager.Name}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
fakeoperatorclient "open-cluster-management.io/api/client/operator/clientset/versioned/fake"
|
||||
operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions"
|
||||
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
)
|
||||
|
||||
@@ -57,31 +60,31 @@ func TestKlusterletSecretQueueKeyFunc(t *testing.T) {
|
||||
name string
|
||||
object runtime.Object
|
||||
klusterlet *operatorapiv1.Klusterlet
|
||||
expectedKey string
|
||||
expectedKey []string
|
||||
}{
|
||||
{
|
||||
name: "key by hub config secret",
|
||||
object: newSecret(HubKubeConfig, "test"),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
{
|
||||
name: "key by bootstrap secret",
|
||||
object: newSecret(BootstrapHubKubeConfig, "test"),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
{
|
||||
name: "key by wrong secret",
|
||||
object: newSecret("dummy", "test"),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "",
|
||||
expectedKey: []string{},
|
||||
},
|
||||
{
|
||||
name: "key by klusterlet with empty namespace",
|
||||
object: newSecret(BootstrapHubKubeConfig, KlusterletDefaultNamespace),
|
||||
klusterlet: newKlusterlet("testklusterlet", "", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -95,8 +98,8 @@ func TestKlusterletSecretQueueKeyFunc(t *testing.T) {
|
||||
}
|
||||
keyFunc := KlusterletSecretQueueKeyFunc(operatorInformers.Operator().V1().Klusterlets().Lister())
|
||||
actualKey := keyFunc(c.object)
|
||||
if actualKey != c.expectedKey {
|
||||
t.Errorf("Queued key is not correct: actual %s, expected %s", actualKey, c.expectedKey)
|
||||
if !reflect.DeepEqual(actualKey, c.expectedKey) {
|
||||
t.Errorf("Queued key is not correct: actual %v, expected %v", actualKey, c.expectedKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -107,31 +110,31 @@ func TestKlusterletDeploymentQueueKeyFunc(t *testing.T) {
|
||||
name string
|
||||
object runtime.Object
|
||||
klusterlet *operatorapiv1.Klusterlet
|
||||
expectedKey string
|
||||
expectedKey []string
|
||||
}{
|
||||
{
|
||||
name: "key by work agent",
|
||||
object: newDeployment("testklusterlet-work-agent", "test", 0),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
{
|
||||
name: "key by registrartion agent",
|
||||
object: newDeployment("testklusterlet-registration-agent", "test", 0),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
{
|
||||
name: "key by wrong deployment",
|
||||
object: newDeployment("dummy", "test", 0),
|
||||
klusterlet: newKlusterlet("testklusterlet", "test", ""),
|
||||
expectedKey: "",
|
||||
expectedKey: []string{},
|
||||
},
|
||||
{
|
||||
name: "key by klusterlet with empty namespace",
|
||||
object: newDeployment("testklusterlet-work-agent", KlusterletDefaultNamespace, 0),
|
||||
klusterlet: newKlusterlet("testklusterlet", "", ""),
|
||||
expectedKey: "testklusterlet",
|
||||
expectedKey: []string{"testklusterlet"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -145,61 +148,97 @@ func TestKlusterletDeploymentQueueKeyFunc(t *testing.T) {
|
||||
}
|
||||
keyFunc := KlusterletDeploymentQueueKeyFunc(operatorInformers.Operator().V1().Klusterlets().Lister())
|
||||
actualKey := keyFunc(c.object)
|
||||
if actualKey != c.expectedKey {
|
||||
t.Errorf("Queued key is not correct: actual %s, expected %s", actualKey, c.expectedKey)
|
||||
if !reflect.DeepEqual(actualKey, c.expectedKey) {
|
||||
t.Errorf("Queued key is not correct: actual %v, expected %v", actualKey, c.expectedKey)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterManagerDeploymentQueueKeyFunc(t *testing.T) {
|
||||
func TestClusterManagerQueueKeyFunc(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
object runtime.Object
|
||||
queueFunc func(clusterManagerLister operatorlister.ClusterManagerLister) factory.ObjectQueueKeysFunc
|
||||
clusterManager *operatorapiv1.ClusterManager
|
||||
expectedKey string
|
||||
expectedKey []string
|
||||
}{
|
||||
{
|
||||
name: "key by registrartion controller",
|
||||
object: newDeployment("testhub-registration-controller", ClusterManagerDefaultNamespace, 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
expectedKey: "testhub",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by registrartion webhook",
|
||||
object: newDeployment("testhub-registration-webhook", ClusterManagerDefaultNamespace, 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
expectedKey: "testhub",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by work webhook",
|
||||
object: newDeployment("testhub-work-webhook", ClusterManagerDefaultNamespace, 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
expectedKey: "testhub",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by placement controller",
|
||||
object: newDeployment("testhub-placement-controller", ClusterManagerDefaultNamespace, 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
expectedKey: "testhub",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by wrong deployment",
|
||||
object: newDeployment("dummy", "test", 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
expectedKey: "",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{},
|
||||
},
|
||||
{
|
||||
name: "key by registrartion controller in hosted mode, namespace not match",
|
||||
object: newDeployment("testhub-registration-controller", ClusterManagerDefaultNamespace, 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeHosted),
|
||||
expectedKey: "",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{},
|
||||
},
|
||||
{
|
||||
name: "key by registrartion controller in hosted mode, namespace match",
|
||||
object: newDeployment("testhub-registration-controller", "testhub", 0),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeHosted),
|
||||
expectedKey: "testhub",
|
||||
queueFunc: ClusterManagerDeploymentQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by namespace in default mode",
|
||||
object: newSecret("test", ClusterManagerDefaultNamespace),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
queueFunc: ClusterManagerQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by unmatchged namespace in default mode",
|
||||
object: newSecret("test", "test"),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeDefault),
|
||||
queueFunc: ClusterManagerQueueKeyFunc,
|
||||
expectedKey: []string{},
|
||||
},
|
||||
{
|
||||
name: "key by namespace in hosted mode",
|
||||
object: newSecret("test", "testhub"),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeHosted),
|
||||
queueFunc: ClusterManagerQueueKeyFunc,
|
||||
expectedKey: []string{"testhub"},
|
||||
},
|
||||
{
|
||||
name: "key by unmatchged namespace in default mode",
|
||||
object: newSecret("test", "test"),
|
||||
clusterManager: newClusterManager("testhub", operatorapiv1.InstallModeHosted),
|
||||
queueFunc: ClusterManagerQueueKeyFunc,
|
||||
expectedKey: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -211,10 +250,10 @@ func TestClusterManagerDeploymentQueueKeyFunc(t *testing.T) {
|
||||
if err := store.Add(c.clusterManager); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
keyFunc := ClusterManagerDeploymentQueueKeyFunc(operatorInformers.Operator().V1().ClusterManagers().Lister())
|
||||
keyFunc := c.queueFunc(operatorInformers.Operator().V1().ClusterManagers().Lister())
|
||||
actualKey := keyFunc(c.object)
|
||||
if actualKey != c.expectedKey {
|
||||
t.Errorf("Queued key is not correct: actual %s, expected %s; test name:%s", actualKey, c.expectedKey, c.name)
|
||||
if !reflect.DeepEqual(actualKey, c.expectedKey) {
|
||||
t.Errorf("Queued key is not correct: actual %v, expected %v; test name:%s", actualKey, c.expectedKey, c.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func NewCertRotationController(
|
||||
ResyncEvery(ResyncInterval).
|
||||
WithSync(c.sync).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
WithInformersQueueKeyFunc(helpers.ClusterManagerQueueKeyFunc(c.clusterManagerLister),
|
||||
WithInformersQueueKeysFunc(helpers.ClusterManagerQueueKeyFunc(c.clusterManagerLister),
|
||||
configMapInformer.Informer(),
|
||||
secretInformers[helpers.SignerSecret].Informer(),
|
||||
secretInformers[helpers.RegistrationWebhookSecret].Informer(),
|
||||
|
||||
@@ -101,9 +101,9 @@ func NewClusterManagerController(
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
ResyncEvery(3*time.Minute).
|
||||
WithInformersQueueKeyFunc(helpers.ClusterManagerDeploymentQueueKeyFunc(controller.clusterManagerLister), deploymentInformer.Informer()).
|
||||
WithFilteredEventsInformersQueueKeyFunc(
|
||||
helpers.ClusterManagerConfigmapQueueKeyFunc(controller.clusterManagerLister),
|
||||
WithInformersQueueKeysFunc(helpers.ClusterManagerDeploymentQueueKeyFunc(controller.clusterManagerLister), deploymentInformer.Informer()).
|
||||
WithFilteredEventsInformersQueueKeysFunc(
|
||||
helpers.ClusterManagerQueueKeyFunc(controller.clusterManagerLister),
|
||||
queue.FilterByNames(helpers.CaBundleConfigmap),
|
||||
configMapInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
|
||||
@@ -50,7 +50,7 @@ func NewClusterManagerStatusController(
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(
|
||||
WithInformersQueueKeysFunc(
|
||||
helpers.ClusterManagerDeploymentQueueKeyFunc(controller.clusterManagerLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, clusterManagerInformer.Informer()).
|
||||
ToController("ClusterManagerStatusController", recorder)
|
||||
|
||||
@@ -64,11 +64,11 @@ func NewKlusterletCleanupController(
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
secretInformers[helpers.HubKubeConfig].Informer(),
|
||||
secretInformers[helpers.BootstrapHubKubeConfig].Informer(),
|
||||
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletCleanupController", recorder)
|
||||
}
|
||||
@@ -112,6 +112,9 @@ func (n *klusterletCleanupController) sync(ctx context.Context, controllerContex
|
||||
ExternalManagedKubeConfigWorkSecret: helpers.ExternalManagedKubeConfigWork,
|
||||
InstallMode: klusterlet.Spec.DeployOption.Mode,
|
||||
HubApiServerHostAlias: klusterlet.Spec.HubApiServerHostAlias,
|
||||
|
||||
RegistrationServiceAccount: serviceAccountName("registration-sa", klusterlet),
|
||||
WorkServiceAccount: serviceAccountName("work-sa", klusterlet),
|
||||
}
|
||||
|
||||
reconcilers := []klusterletReconcile{
|
||||
|
||||
@@ -96,11 +96,12 @@ func NewKlusterletController(
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
secretInformers[helpers.HubKubeConfig].Informer(),
|
||||
secretInformers[helpers.BootstrapHubKubeConfig].Informer(),
|
||||
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(
|
||||
controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletController", recorder)
|
||||
}
|
||||
@@ -123,6 +124,9 @@ type klusterletConfig struct {
|
||||
AgentID string
|
||||
RegistrationImage string
|
||||
WorkImage string
|
||||
SingletonImage string
|
||||
RegistrationServiceAccount string
|
||||
WorkServiceAccount string
|
||||
ClusterName string
|
||||
ExternalServerURL string
|
||||
HubKubeConfigSecret string
|
||||
@@ -163,6 +167,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
RegistrationImage: klusterlet.Spec.RegistrationImagePullSpec,
|
||||
WorkImage: klusterlet.Spec.WorkImagePullSpec,
|
||||
ClusterName: klusterlet.Spec.ClusterName,
|
||||
SingletonImage: klusterlet.Spec.ImagePullSpec,
|
||||
BootStrapKubeConfigSecret: helpers.BootstrapHubKubeConfig,
|
||||
HubKubeConfigSecret: helpers.HubKubeConfig,
|
||||
ExternalServerURL: getServersFromKlusterlet(klusterlet),
|
||||
@@ -174,6 +179,9 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto
|
||||
ExternalManagedKubeConfigWorkSecret: helpers.ExternalManagedKubeConfigWork,
|
||||
InstallMode: klusterlet.Spec.DeployOption.Mode,
|
||||
HubApiServerHostAlias: klusterlet.Spec.HubApiServerHostAlias,
|
||||
|
||||
RegistrationServiceAccount: serviceAccountName("registration-sa", klusterlet),
|
||||
WorkServiceAccount: serviceAccountName("work-sa", klusterlet),
|
||||
}
|
||||
|
||||
managedClusterClients, err := n.managedClusterClientsBuilder.
|
||||
@@ -384,3 +392,12 @@ func ensureNamespace(ctx context.Context, kubeClient kubernetes.Interface, klust
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceAccountName(suffix string, klusterlet *operatorapiv1.Klusterlet) string {
|
||||
// in singleton mode, we only need one sa, so the name of work and registration sa are
|
||||
// the same.
|
||||
if klusterlet.Spec.DeployOption.Mode == operatorapiv1.InstallModeSingleton {
|
||||
return fmt.Sprintf("%s-agent-sa", klusterlet.Name)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", klusterlet.Name, suffix)
|
||||
}
|
||||
|
||||
@@ -92,6 +92,7 @@ func newKlusterlet(name, namespace, clustername string) *operatorapiv1.Klusterle
|
||||
Spec: operatorapiv1.KlusterletSpec{
|
||||
RegistrationImagePullSpec: "testregistration",
|
||||
WorkImagePullSpec: "testwork",
|
||||
ImagePullSpec: "testagent",
|
||||
ClusterName: clustername,
|
||||
Namespace: namespace,
|
||||
ExternalServerURLs: []operatorapiv1.ServerURL{},
|
||||
@@ -206,8 +207,8 @@ func newTestControllerHosted(t *testing.T, klusterlet *operatorapiv1.Klusterlet,
|
||||
kubeVersion, _ := version.ParseGeneric("v1.18.0")
|
||||
|
||||
klusterletNamespace := helpers.KlusterletNamespace(klusterlet)
|
||||
saRegistrationSecret := newServiceAccountSecret(fmt.Sprintf("%s-token", registrationServiceAccountName(klusterlet.Name)), klusterlet.Name)
|
||||
saWorkSecret := newServiceAccountSecret(fmt.Sprintf("%s-token", workServiceAccountName(klusterlet.Name)), klusterlet.Name)
|
||||
saRegistrationSecret := newServiceAccountSecret(fmt.Sprintf("%s-token", serviceAccountName("registration-sa", klusterlet)), klusterlet.Name)
|
||||
saWorkSecret := newServiceAccountSecret(fmt.Sprintf("%s-token", serviceAccountName("work-sa", klusterlet)), klusterlet.Name)
|
||||
fakeManagedKubeClient := fakekube.NewSimpleClientset()
|
||||
getRegistrationServiceAccountCount := 0
|
||||
getWorkServiceAccountCount := 0
|
||||
@@ -218,7 +219,7 @@ func newTestControllerHosted(t *testing.T, klusterlet *operatorapiv1.Klusterlet,
|
||||
fakeManagedKubeClient.PrependReactor("get", "serviceaccounts", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
name := action.(clienttesting.GetAction).GetName()
|
||||
namespace := action.(clienttesting.GetAction).GetNamespace()
|
||||
if namespace == klusterletNamespace && name == registrationServiceAccountName(klusterlet.Name) {
|
||||
if namespace == klusterletNamespace && name == serviceAccountName("registration-sa", klusterlet) {
|
||||
getRegistrationServiceAccountCount++
|
||||
if getRegistrationServiceAccountCount > 1 {
|
||||
sa := newServiceAccount(name, klusterletNamespace, saRegistrationSecret.Name)
|
||||
@@ -227,7 +228,7 @@ func newTestControllerHosted(t *testing.T, klusterlet *operatorapiv1.Klusterlet,
|
||||
}
|
||||
}
|
||||
|
||||
if namespace == klusterletNamespace && name == workServiceAccountName(klusterlet.Name) {
|
||||
if namespace == klusterletNamespace && name == serviceAccountName("work-sa", klusterlet) {
|
||||
getWorkServiceAccountCount++
|
||||
if getWorkServiceAccountCount > 1 {
|
||||
sa := newServiceAccount(name, klusterletNamespace, saWorkSecret.Name)
|
||||
@@ -447,8 +448,16 @@ func ensureObject(t *testing.T, object runtime.Object, klusterlet *operatorapiv1
|
||||
t.Errorf("Image does not match to the expected.")
|
||||
return
|
||||
}
|
||||
} else if strings.Contains(access.GetName(), "agent") {
|
||||
testingcommon.AssertEqualNameNamespace(
|
||||
t, access.GetName(), access.GetNamespace(),
|
||||
fmt.Sprintf("%s-agent", klusterlet.Name), namespace)
|
||||
if klusterlet.Spec.ImagePullSpec != o.Spec.Template.Spec.Containers[0].Image {
|
||||
t.Errorf("Image does not match to the expected.")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Unexpected deployment")
|
||||
t.Errorf("unexpected deployment")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -515,6 +524,67 @@ func TestSyncDeploy(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestSyncDeploySingleton(t *testing.T) {
|
||||
klusterlet := newKlusterlet("klusterlet", "testns", "cluster1")
|
||||
klusterlet.Spec.DeployOption.Mode = operatorapiv1.InstallModeSingleton
|
||||
bootStrapSecret := newSecret(helpers.BootstrapHubKubeConfig, "testns")
|
||||
hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns")
|
||||
hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig")
|
||||
namespace := newNamespace("testns")
|
||||
controller := newTestController(t, klusterlet, nil, bootStrapSecret, hubKubeConfigSecret, namespace)
|
||||
syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet")
|
||||
|
||||
err := controller.controller.sync(context.TODO(), syncContext)
|
||||
if err != nil {
|
||||
t.Errorf("Expected non error when sync, %v", err)
|
||||
}
|
||||
|
||||
createObjects := []runtime.Object{}
|
||||
kubeActions := controller.kubeClient.Actions()
|
||||
for _, action := range kubeActions {
|
||||
if action.GetVerb() == "create" {
|
||||
object := action.(clienttesting.CreateActionImpl).Object
|
||||
createObjects = append(createObjects, object)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Check if resources are created as expected
|
||||
// 10 managed static manifests + 10 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments
|
||||
if len(createObjects) != 21 {
|
||||
t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects))
|
||||
}
|
||||
for _, object := range createObjects {
|
||||
ensureObject(t, object, klusterlet)
|
||||
}
|
||||
|
||||
apiExtenstionAction := controller.apiExtensionClient.Actions()
|
||||
createCRDObjects := []runtime.Object{}
|
||||
for _, action := range apiExtenstionAction {
|
||||
if action.GetVerb() == "create" && action.GetResource().Resource == "customresourcedefinitions" {
|
||||
object := action.(clienttesting.CreateActionImpl).Object
|
||||
createCRDObjects = append(createCRDObjects, object)
|
||||
}
|
||||
}
|
||||
if len(createCRDObjects) != 2 {
|
||||
t.Errorf("Expect 2 objects created in the sync loop, actual %d", len(createCRDObjects))
|
||||
}
|
||||
|
||||
operatorAction := controller.operatorClient.Actions()
|
||||
testingcommon.AssertActions(t, operatorAction, "patch")
|
||||
klusterlet = &operatorapiv1.Klusterlet{}
|
||||
patchData := operatorAction[0].(clienttesting.PatchActionImpl).Patch
|
||||
err = json.Unmarshal(patchData, klusterlet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testinghelper.AssertOnlyConditions(
|
||||
t, klusterlet,
|
||||
testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue),
|
||||
testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue),
|
||||
)
|
||||
}
|
||||
|
||||
// TestSyncDeployHosted test deployment of klusterlet components in hosted mode
|
||||
func TestSyncDeployHosted(t *testing.T) {
|
||||
klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1")
|
||||
|
||||
@@ -33,15 +33,19 @@ type runtimeReconcile struct {
|
||||
|
||||
func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
|
||||
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
|
||||
if config.InstallMode == operatorapiv1.InstallModeSingleton {
|
||||
return r.installSingletonAgent(ctx, klusterlet, config)
|
||||
}
|
||||
|
||||
if config.InstallMode == operatorapiv1.InstallModeHosted {
|
||||
// Create managed config secret for registration and work.
|
||||
if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace,
|
||||
registrationServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigRegistrationSecret,
|
||||
config.RegistrationServiceAccount, config.ExternalManagedKubeConfigRegistrationSecret,
|
||||
r.recorder); err != nil {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace,
|
||||
workServiceAccountName(klusterlet.Name), config.ExternalManagedKubeConfigWorkSecret,
|
||||
config.WorkServiceAccount, config.ExternalManagedKubeConfigWorkSecret,
|
||||
r.recorder); err != nil {
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
@@ -126,6 +130,35 @@ func (r *runtimeReconcile) reconcile(ctx context.Context, klusterlet *operatorap
|
||||
return klusterlet, reconcileContinue, nil
|
||||
}
|
||||
|
||||
func (r *runtimeReconcile) installSingletonAgent(ctx context.Context, klusterlet *operatorapiv1.Klusterlet,
|
||||
config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) {
|
||||
// Deploy singleton agent
|
||||
_, generationStatus, err := helpers.ApplyDeployment(
|
||||
ctx,
|
||||
r.kubeClient,
|
||||
klusterlet.Status.Generations,
|
||||
klusterlet.Spec.NodePlacement,
|
||||
func(name string) ([]byte, error) {
|
||||
template, err := manifests.KlusterletManifestFiles.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objData := assets.MustCreateAssetFromTemplate(name, template, config).Data
|
||||
helpers.SetRelatedResourcesStatusesWithObj(&klusterlet.Status.RelatedResources, objData)
|
||||
return objData, nil
|
||||
},
|
||||
r.recorder,
|
||||
"klusterlet/management/klusterlet-agent-deployment.yaml")
|
||||
|
||||
if err != nil {
|
||||
// TODO update condition
|
||||
return klusterlet, reconcileStop, err
|
||||
}
|
||||
|
||||
helpers.SetGenerationStatuses(&klusterlet.Status.Generations, generationStatus)
|
||||
return klusterlet, reconcileContinue, nil
|
||||
}
|
||||
|
||||
func (r *runtimeReconcile) createManagedClusterKubeconfig(
|
||||
ctx context.Context,
|
||||
klusterlet *operatorapiv1.Klusterlet,
|
||||
@@ -170,6 +203,9 @@ func (r *runtimeReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
|
||||
fmt.Sprintf("%s-registration-agent", config.KlusterletName),
|
||||
fmt.Sprintf("%s-work-agent", config.KlusterletName),
|
||||
}
|
||||
if klusterlet.Spec.DeployOption.Mode == operatorapiv1.InstallModeSingleton {
|
||||
deployments = []string{fmt.Sprintf("%s-agent", config.KlusterletName)}
|
||||
}
|
||||
for _, deployment := range deployments {
|
||||
err := r.kubeClient.AppsV1().Deployments(config.AgentNamespace).Delete(ctx, deployment, metav1.DeleteOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
@@ -180,13 +216,3 @@ func (r *runtimeReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.
|
||||
|
||||
return klusterlet, reconcileContinue, nil
|
||||
}
|
||||
|
||||
// registrationServiceAccountName splices the name of registration service account
|
||||
func registrationServiceAccountName(klusterletName string) string {
|
||||
return fmt.Sprintf("%s-registration-sa", klusterletName)
|
||||
}
|
||||
|
||||
// workServiceAccountName splices the name of work service account
|
||||
func workServiceAccountName(klusterletName string) string {
|
||||
return fmt.Sprintf("%s-work-sa", klusterletName)
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func NewKlusterletSSARController(
|
||||
}
|
||||
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletSecretQueueKeyFunc(controller.klusterletLister),
|
||||
secretInformers[helpers.HubKubeConfig].Informer(),
|
||||
secretInformers[helpers.BootstrapHubKubeConfig].Informer(),
|
||||
secretInformers[helpers.ExternalManagedKubeConfig].Informer()).
|
||||
@@ -247,7 +247,7 @@ func checkBootstrapSecret(ctx context.Context, kubeClient kubernetes.Interface,
|
||||
}
|
||||
|
||||
func getBootstrapSSARs() []authorizationv1.SelfSubjectAccessReview {
|
||||
reviews := []authorizationv1.SelfSubjectAccessReview{}
|
||||
var reviews []authorizationv1.SelfSubjectAccessReview
|
||||
clusterResource := authorizationv1.ResourceAttributes{
|
||||
Group: "cluster.open-cluster-management.io",
|
||||
Resource: "managedclusters",
|
||||
@@ -335,7 +335,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface,
|
||||
}
|
||||
|
||||
func getHubConfigSSARs(clusterName string) []authorizationv1.SelfSubjectAccessReview {
|
||||
reviews := []authorizationv1.SelfSubjectAccessReview{}
|
||||
var reviews []authorizationv1.SelfSubjectAccessReview
|
||||
|
||||
// registration resources
|
||||
certResource := authorizationv1.ResourceAttributes{
|
||||
@@ -435,7 +435,7 @@ func buildKubeClientWithSecret(secret *corev1.Secret) (kubernetes.Interface, str
|
||||
}
|
||||
|
||||
func generateSelfSubjectAccessReviews(resource authorizationv1.ResourceAttributes, verbs ...string) []authorizationv1.SelfSubjectAccessReview {
|
||||
reviews := []authorizationv1.SelfSubjectAccessReview{}
|
||||
var reviews []authorizationv1.SelfSubjectAccessReview
|
||||
for _, verb := range verbs {
|
||||
reviews = append(reviews, authorizationv1.SelfSubjectAccessReview{
|
||||
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
|
||||
|
||||
@@ -54,7 +54,7 @@ func NewKlusterletStatusController(
|
||||
klusterletLister: klusterletInformer.Lister(),
|
||||
}
|
||||
return factory.New().WithSync(controller.sync).
|
||||
WithInformersQueueKeyFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(helpers.KlusterletDeploymentQueueKeyFunc(controller.klusterletLister), deploymentInformer.Informer()).
|
||||
WithInformersQueueKeysFunc(queue.QueueKeyByMetaName, klusterletInformer.Informer()).
|
||||
ToController("KlusterletStatusController", recorder)
|
||||
}
|
||||
@@ -85,6 +85,11 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext
|
||||
registrationDeploymentName := fmt.Sprintf("%s-registration-agent", klusterlet.Name)
|
||||
workDeploymentName := fmt.Sprintf("%s-work-agent", klusterlet.Name)
|
||||
|
||||
if klusterlet.Spec.DeployOption.Mode == operatorapiv1.InstallModeSingleton {
|
||||
registrationDeploymentName = fmt.Sprintf("%s-agent", klusterlet.Name)
|
||||
workDeploymentName = registrationDeploymentName
|
||||
}
|
||||
|
||||
availableCondition := checkAgentsDeploymentAvailable(
|
||||
ctx, k.kubeClient,
|
||||
[]klusterletAgent{
|
||||
|
||||
@@ -165,6 +165,22 @@ func TestSync(t *testing.T) {
|
||||
testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Available & Desired with singleton",
|
||||
object: []runtime.Object{
|
||||
newDeployment("testklusterlet-agent", "test", 3, 3),
|
||||
},
|
||||
klusterlet: func() *operatorapiv1.Klusterlet {
|
||||
k := newKlusterlet("testklusterlet", "test", "cluster1")
|
||||
k.Spec.DeployOption.Mode = operatorapiv1.InstallModeSingleton
|
||||
return k
|
||||
}(),
|
||||
expectedConditions: []metav1.Condition{
|
||||
testinghelper.NamedCondition(klusterletAvailable, "klusterletAvailable", metav1.ConditionTrue),
|
||||
testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse),
|
||||
testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
||||
@@ -259,7 +259,7 @@ func (v *v1CSRControl) get(name string) (metav1.Object, error) {
|
||||
}
|
||||
|
||||
func NewCSRControl(hubCSRInformer certificatesinformers.Interface, hubKubeClient kubernetes.Interface) (CSRControl, error) {
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.V1beta1CSRAPICompatibility) {
|
||||
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.V1beta1CSRAPICompatibility) {
|
||||
v1CSRSupported, v1beta1CSRSupported, err := helpers.IsCSRSupported(hubKubeClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -28,7 +28,7 @@ type claimReconcile struct {
|
||||
}
|
||||
|
||||
func (r *claimReconcile) reconcile(ctx context.Context, cluster *clusterv1.ManagedCluster) (*clusterv1.ManagedCluster, reconcileState, error) {
|
||||
if !features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
|
||||
if !features.SpokeMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
|
||||
return cluster, reconcileContinue, nil
|
||||
}
|
||||
// current managed cluster has not joined the hub yet, do nothing.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
@@ -18,12 +19,15 @@ import (
|
||||
clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
)
|
||||
|
||||
func TestSync(t *testing.T) {
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates))
|
||||
cases := []struct {
|
||||
name string
|
||||
cluster runtime.Object
|
||||
|
||||
72
pkg/registration/spoke/options.go
Normal file
72
pkg/registration/spoke/options.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package spoke
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
)
|
||||
|
||||
// SpokeAgentOptions holds configuration for spoke cluster agent
|
||||
type SpokeAgentOptions struct {
|
||||
BootstrapKubeconfig string
|
||||
HubKubeconfigSecret string
|
||||
SpokeExternalServerURLs []string
|
||||
ClusterHealthCheckPeriod time.Duration
|
||||
MaxCustomClusterClaims int
|
||||
ClientCertExpirationSeconds int32
|
||||
}
|
||||
|
||||
func NewSpokeAgentOptions() *SpokeAgentOptions {
|
||||
return &SpokeAgentOptions{
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags registers flags for Agent
|
||||
func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&o.BootstrapKubeconfig, "bootstrap-kubeconfig", o.BootstrapKubeconfig,
|
||||
"The path of the kubeconfig file for agent bootstrap.")
|
||||
fs.StringVar(&o.HubKubeconfigSecret, "hub-kubeconfig-secret", o.HubKubeconfigSecret,
|
||||
"The name of secret in component namespace storing kubeconfig for hub.")
|
||||
fs.StringArrayVar(&o.SpokeExternalServerURLs, "spoke-external-server-urls", o.SpokeExternalServerURLs,
|
||||
"A list of reachable spoke cluster api server URLs for hub cluster.")
|
||||
fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod,
|
||||
"The period to check managed cluster kube-apiserver health")
|
||||
fs.IntVar(&o.MaxCustomClusterClaims, "max-custom-cluster-claims", o.MaxCustomClusterClaims,
|
||||
"The max number of custom cluster claims to expose.")
|
||||
fs.Int32Var(&o.ClientCertExpirationSeconds, "client-cert-expiration-seconds", o.ClientCertExpirationSeconds,
|
||||
"The requested duration in seconds of validity of the issued client certificate. If this is not set, "+
|
||||
"the value of --cluster-signing-duration command-line flag of the kube-controller-manager will be used.")
|
||||
}
|
||||
|
||||
// Validate verifies the inputs.
|
||||
func (o *SpokeAgentOptions) Validate() error {
|
||||
if o.BootstrapKubeconfig == "" {
|
||||
return errors.New("bootstrap-kubeconfig is required")
|
||||
}
|
||||
|
||||
// if SpokeExternalServerURLs is specified we validate every URL in it, we expect the spoke external server URL is https
|
||||
if len(o.SpokeExternalServerURLs) != 0 {
|
||||
for _, serverURL := range o.SpokeExternalServerURLs {
|
||||
if !helpers.IsValidHTTPSURL(serverURL) {
|
||||
return fmt.Errorf("%q is invalid", serverURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.ClusterHealthCheckPeriod <= 0 {
|
||||
return errors.New("cluster healthcheck period must greater than zero")
|
||||
}
|
||||
|
||||
if o.ClientCertExpirationSeconds != 0 && o.ClientCertExpirationSeconds < 3600 {
|
||||
return errors.New("client certificate expiration seconds must greater or qual to 3600")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package spoke
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@@ -11,15 +10,11 @@ import (
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -34,46 +29,26 @@ import (
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
"open-cluster-management.io/ocm/pkg/registration/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/addon"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/lease"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/managedcluster"
|
||||
"open-cluster-management.io/ocm/pkg/registration/spoke/registration"
|
||||
)
|
||||
|
||||
const (
|
||||
// spokeAgentNameLength is the length of the spoke agent name which is generated automatically
|
||||
spokeAgentNameLength = 5
|
||||
// defaultSpokeComponentNamespace is the default namespace in which the spoke agent is deployed
|
||||
defaultSpokeComponentNamespace = "open-cluster-management-agent"
|
||||
)
|
||||
|
||||
// AddOnLeaseControllerSyncInterval is exposed so that integration tests can crank up the constroller sync speed.
|
||||
// TODO if we register the lease informer to the lease controller, we need to increase this time
|
||||
var AddOnLeaseControllerSyncInterval = 30 * time.Second
|
||||
|
||||
// SpokeAgentOptions holds configuration for spoke cluster agent
|
||||
type SpokeAgentOptions struct {
|
||||
AgentOptions *commonoptions.AgentOptions
|
||||
ComponentNamespace string
|
||||
AgentName string
|
||||
BootstrapKubeconfig string
|
||||
HubKubeconfigSecret string
|
||||
HubKubeconfigDir string
|
||||
SpokeExternalServerURLs []string
|
||||
ClusterHealthCheckPeriod time.Duration
|
||||
MaxCustomClusterClaims int
|
||||
ClientCertExpirationSeconds int32
|
||||
type SpokeAgentConfig struct {
|
||||
agentOptions *commonoptions.AgentOptions
|
||||
registrationOption *SpokeAgentOptions
|
||||
}
|
||||
|
||||
// NewSpokeAgentOptions returns a SpokeAgentOptions
|
||||
func NewSpokeAgentOptions() *SpokeAgentOptions {
|
||||
return &SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
// NewSpokeAgentConfig returns a SpokeAgentConfig
|
||||
func NewSpokeAgentConfig(commonOpts *commonoptions.AgentOptions, opts *SpokeAgentOptions) *SpokeAgentConfig {
|
||||
return &SpokeAgentConfig{
|
||||
agentOptions: commonOpts,
|
||||
registrationOption: opts,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,12 +82,12 @@ func NewSpokeAgentOptions() *SpokeAgentOptions {
|
||||
// and started if the hub kubeconfig does not exist or is invalid and used to
|
||||
// create a valid hub kubeconfig. Once the hub kubeconfig is valid, the
|
||||
// temporary controller is stopped and the main controllers are started.
|
||||
func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
func (o *SpokeAgentConfig) RunSpokeAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
kubeConfig := controllerContext.KubeConfig
|
||||
|
||||
// load spoke client config and create spoke clients,
|
||||
// the registration agent may not running in the spoke/managed cluster.
|
||||
spokeClientConfig, err := o.AgentOptions.SpokeKubeConfig(kubeConfig)
|
||||
spokeClientConfig, err := o.agentOptions.SpokeKubeConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -138,13 +113,13 @@ func (o *SpokeAgentOptions) RunSpokeAgent(ctx context.Context, controllerContext
|
||||
)
|
||||
}
|
||||
|
||||
func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
kubeConfig, spokeClientConfig *rest.Config,
|
||||
spokeKubeClient kubernetes.Interface,
|
||||
spokeKubeInformerFactory informers.SharedInformerFactory,
|
||||
spokeClusterInformerFactory clusterv1informers.SharedInformerFactory,
|
||||
recorder events.Recorder) error {
|
||||
klog.Infof("Cluster name is %q and agent name is %q", o.AgentOptions.SpokeClusterName, o.AgentName)
|
||||
klog.Infof("Cluster name is %q and agent ID is %q", o.agentOptions.SpokeClusterName, o.agentOptions.AgentID)
|
||||
|
||||
// create management kube client
|
||||
managementKubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
@@ -152,12 +127,23 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
// the hub kubeconfig secret stored in the cluster where the agent pod runs
|
||||
if err := o.Complete(managementKubeClient.CoreV1(), ctx, recorder); err != nil {
|
||||
// dump data in hub kubeconfig secret into file system if it exists
|
||||
err = registration.DumpSecret(
|
||||
managementKubeClient.CoreV1(), o.agentOptions.ComponentNamespace, o.registrationOption.HubKubeconfigSecret,
|
||||
o.agentOptions.HubKubeconfigDir, ctx, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := o.registrationOption.Validate(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
if err := o.Validate(); err != nil {
|
||||
if err := o.agentOptions.Complete(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
if err := o.agentOptions.Validate(); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -169,12 +155,12 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// create a shared informer factory with specific namespace for the management cluster.
|
||||
namespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions(
|
||||
managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace))
|
||||
managementKubeClient, 10*time.Minute, informers.WithNamespace(o.agentOptions.ComponentNamespace))
|
||||
|
||||
// load bootstrap client config and create bootstrap clients
|
||||
bootstrapClientConfig, err := clientcmd.BuildConfigFromFlags("", o.BootstrapKubeconfig)
|
||||
bootstrapClientConfig, err := clientcmd.BuildConfigFromFlags("", o.registrationOption.BootstrapKubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load bootstrap kubeconfig from file %q: %w", o.BootstrapKubeconfig, err)
|
||||
return fmt.Errorf("unable to load bootstrap kubeconfig from file %q: %w", o.registrationOption.BootstrapKubeconfig, err)
|
||||
}
|
||||
bootstrapKubeClient, err := kubernetes.NewForConfig(bootstrapClientConfig)
|
||||
if err != nil {
|
||||
@@ -187,7 +173,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// start a SpokeClusterCreatingController to make sure there is a spoke cluster on hub cluster
|
||||
spokeClusterCreatingController := registration.NewManagedClusterCreatingController(
|
||||
o.AgentOptions.SpokeClusterName, o.SpokeExternalServerURLs,
|
||||
o.agentOptions.SpokeClusterName, o.registrationOption.SpokeExternalServerURLs,
|
||||
spokeClusterCABundle,
|
||||
bootstrapClusterClient,
|
||||
recorder,
|
||||
@@ -195,7 +181,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
go spokeClusterCreatingController.Run(ctx, 1)
|
||||
|
||||
hubKubeconfigSecretController := registration.NewHubKubeconfigSecretController(
|
||||
o.HubKubeconfigDir, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.agentOptions.HubKubeconfigDir, o.agentOptions.ComponentNamespace, o.registrationOption.HubKubeconfigSecret,
|
||||
// the hub kubeconfig secret stored in the cluster where the agent pod runs
|
||||
managementKubeClient.CoreV1(),
|
||||
namespacedManagementKubeInformerFactory.Core().V1().Secrets(),
|
||||
@@ -205,7 +191,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
go namespacedManagementKubeInformerFactory.Start(ctx.Done())
|
||||
|
||||
// check if there already exists a valid client config for hub
|
||||
ok, err := o.hasValidHubClientConfig(ctx)
|
||||
ok, err := o.HasValidHubClientConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -220,7 +206,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
// the bootstrap informers are supposed to be terminated after completing the bootstrap process.
|
||||
bootstrapInformerFactory := informers.NewSharedInformerFactory(bootstrapKubeClient, 10*time.Minute)
|
||||
bootstrapNamespacedManagementKubeInformerFactory := informers.NewSharedInformerFactoryWithOptions(
|
||||
managementKubeClient, 10*time.Minute, informers.WithNamespace(o.ComponentNamespace))
|
||||
managementKubeClient, 10*time.Minute, informers.WithNamespace(o.agentOptions.ComponentNamespace))
|
||||
|
||||
// create a kubeconfig with references to the key/cert files in the same secret
|
||||
kubeconfig := clientcert.BuildKubeconfig(bootstrapClientConfig, clientcert.TLSCertFile, clientcert.TLSKeyFile)
|
||||
@@ -234,14 +220,14 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName)
|
||||
controllerName := fmt.Sprintf("BootstrapClientCertController@cluster:%s", o.agentOptions.SpokeClusterName)
|
||||
clientCertForHubController := registration.NewClientCertForHubController(
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.agentOptions.SpokeClusterName, o.agentOptions.AgentID, o.agentOptions.ComponentNamespace, o.registrationOption.HubKubeconfigSecret,
|
||||
kubeconfigData,
|
||||
// store the secret in the cluster where the agent pod runs
|
||||
bootstrapNamespacedManagementKubeInformerFactory.Core().V1().Secrets(),
|
||||
csrControl,
|
||||
o.ClientCertExpirationSeconds,
|
||||
o.registrationOption.ClientCertExpirationSeconds,
|
||||
managementKubeClient,
|
||||
registration.GenerateBootstrapStatusUpdater(),
|
||||
recorder,
|
||||
@@ -257,7 +243,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// wait for the hub client config is ready.
|
||||
klog.Info("Waiting for hub client config and managed cluster to be ready")
|
||||
if err := wait.PollUntilContextCancel(bootstrapCtx, 1*time.Second, true, o.hasValidHubClientConfig); err != nil {
|
||||
if err := wait.PollUntilContextCancel(bootstrapCtx, 1*time.Second, true, o.HasValidHubClientConfig); err != nil {
|
||||
// TODO need run the bootstrap CSR forever to re-establish the client-cert if it is ever lost.
|
||||
stopBootstrap()
|
||||
return err
|
||||
@@ -268,7 +254,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
}
|
||||
|
||||
// create hub clients and shared informer factories from hub kube config
|
||||
hubClientConfig, err := clientcmd.BuildConfigFromFlags("", path.Join(o.HubKubeconfigDir, clientcert.KubeconfigFile))
|
||||
hubClientConfig, err := clientcmd.BuildConfigFromFlags("", o.agentOptions.HubKubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -292,20 +278,20 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
hubKubeClient,
|
||||
10*time.Minute,
|
||||
informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
|
||||
listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.AgentOptions.SpokeClusterName)
|
||||
listOptions.LabelSelector = fmt.Sprintf("%s=%s", clusterv1.ClusterNameLabelKey, o.agentOptions.SpokeClusterName)
|
||||
}),
|
||||
)
|
||||
addOnInformerFactory := addoninformers.NewSharedInformerFactoryWithOptions(
|
||||
addOnClient,
|
||||
10*time.Minute,
|
||||
addoninformers.WithNamespace(o.AgentOptions.SpokeClusterName),
|
||||
addoninformers.WithNamespace(o.agentOptions.SpokeClusterName),
|
||||
)
|
||||
// create a cluster informer factory with name field selector because we just need to handle the current spoke cluster
|
||||
hubClusterInformerFactory := clusterv1informers.NewSharedInformerFactoryWithOptions(
|
||||
hubClusterClient,
|
||||
10*time.Minute,
|
||||
clusterv1informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.AgentOptions.SpokeClusterName).String()
|
||||
listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", o.agentOptions.SpokeClusterName).String()
|
||||
}),
|
||||
)
|
||||
|
||||
@@ -324,18 +310,18 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
}
|
||||
|
||||
// create another ClientCertForHubController for client certificate rotation
|
||||
controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.AgentOptions.SpokeClusterName)
|
||||
controllerName := fmt.Sprintf("ClientCertController@cluster:%s", o.agentOptions.SpokeClusterName)
|
||||
clientCertForHubController := registration.NewClientCertForHubController(
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.agentOptions.SpokeClusterName, o.agentOptions.AgentID, o.agentOptions.ComponentNamespace, o.registrationOption.HubKubeconfigSecret,
|
||||
kubeconfigData,
|
||||
namespacedManagementKubeInformerFactory.Core().V1().Secrets(),
|
||||
csrControl,
|
||||
o.ClientCertExpirationSeconds,
|
||||
o.registrationOption.ClientCertExpirationSeconds,
|
||||
managementKubeClient,
|
||||
registration.GenerateStatusUpdater(
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters().Lister(),
|
||||
o.AgentOptions.SpokeClusterName),
|
||||
o.agentOptions.SpokeClusterName),
|
||||
recorder,
|
||||
controllerName,
|
||||
)
|
||||
@@ -345,7 +331,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// create ManagedClusterLeaseController to keep the spoke cluster heartbeat
|
||||
managedClusterLeaseController := lease.NewManagedClusterLeaseController(
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
hubKubeClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
recorder,
|
||||
@@ -353,22 +339,22 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
|
||||
// create NewManagedClusterStatusController to update the spoke cluster status
|
||||
managedClusterHealthCheckController := managedcluster.NewManagedClusterStatusController(
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
hubClusterClient,
|
||||
hubClusterInformerFactory.Cluster().V1().ManagedClusters(),
|
||||
spokeKubeClient.Discovery(),
|
||||
spokeClusterInformerFactory.Cluster().V1alpha1().ClusterClaims(),
|
||||
spokeKubeInformerFactory.Core().V1().Nodes(),
|
||||
o.MaxCustomClusterClaims,
|
||||
o.ClusterHealthCheckPeriod,
|
||||
o.registrationOption.MaxCustomClusterClaims,
|
||||
o.registrationOption.ClusterHealthCheckPeriod,
|
||||
recorder,
|
||||
)
|
||||
|
||||
var addOnLeaseController factory.Controller
|
||||
var addOnRegistrationController factory.Controller
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.AddonManagement) {
|
||||
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.AddonManagement) {
|
||||
addOnLeaseController = addon.NewManagedClusterAddOnLeaseController(
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
addOnClient,
|
||||
addOnInformerFactory.Addon().V1alpha1().ManagedClusterAddOns(),
|
||||
hubKubeClient.CoordinationV1(),
|
||||
@@ -379,8 +365,8 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
)
|
||||
|
||||
addOnRegistrationController = addon.NewAddOnRegistrationController(
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.AgentName,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
o.agentOptions.AgentID,
|
||||
kubeconfigData,
|
||||
addOnClient,
|
||||
managementKubeClient,
|
||||
@@ -397,14 +383,14 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
go addOnInformerFactory.Start(ctx.Done())
|
||||
|
||||
go spokeKubeInformerFactory.Start(ctx.Done())
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
|
||||
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.ClusterClaim) {
|
||||
go spokeClusterInformerFactory.Start(ctx.Done())
|
||||
}
|
||||
|
||||
go clientCertForHubController.Run(ctx, 1)
|
||||
go managedClusterLeaseController.Run(ctx, 1)
|
||||
go managedClusterHealthCheckController.Run(ctx, 1)
|
||||
if features.DefaultSpokeRegistrationMutableFeatureGate.Enabled(ocmfeature.AddonManagement) {
|
||||
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.AddonManagement) {
|
||||
go addOnLeaseController.Run(ctx, 1)
|
||||
go addOnRegistrationController.Run(ctx, 1)
|
||||
}
|
||||
@@ -413,95 +399,7 @@ func (o *SpokeAgentOptions) RunSpokeAgentWithSpokeInformers(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddFlags registers flags for Agent
|
||||
func (o *SpokeAgentOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
features.DefaultSpokeRegistrationMutableFeatureGate.AddFlag(fs)
|
||||
o.AgentOptions.AddFlags(fs)
|
||||
fs.StringVar(&o.BootstrapKubeconfig, "bootstrap-kubeconfig", o.BootstrapKubeconfig,
|
||||
"The path of the kubeconfig file for agent bootstrap.")
|
||||
fs.StringVar(&o.HubKubeconfigSecret, "hub-kubeconfig-secret", o.HubKubeconfigSecret,
|
||||
"The name of secret in component namespace storing kubeconfig for hub.")
|
||||
fs.StringVar(&o.HubKubeconfigDir, "hub-kubeconfig-dir", o.HubKubeconfigDir,
|
||||
"The mount path of hub-kubeconfig-secret in the container.")
|
||||
fs.StringArrayVar(&o.SpokeExternalServerURLs, "spoke-external-server-urls", o.SpokeExternalServerURLs,
|
||||
"A list of reachable spoke cluster api server URLs for hub cluster.")
|
||||
fs.DurationVar(&o.ClusterHealthCheckPeriod, "cluster-healthcheck-period", o.ClusterHealthCheckPeriod,
|
||||
"The period to check managed cluster kube-apiserver health")
|
||||
fs.IntVar(&o.MaxCustomClusterClaims, "max-custom-cluster-claims", o.MaxCustomClusterClaims,
|
||||
"The max number of custom cluster claims to expose.")
|
||||
fs.Int32Var(&o.ClientCertExpirationSeconds, "client-cert-expiration-seconds", o.ClientCertExpirationSeconds,
|
||||
"The requested duration in seconds of validity of the issued client certificate. If this is not set, "+
|
||||
"the value of --cluster-signing-duration command-line flag of the kube-controller-manager will be used.")
|
||||
}
|
||||
|
||||
// Validate verifies the inputs.
|
||||
func (o *SpokeAgentOptions) Validate() error {
|
||||
if o.BootstrapKubeconfig == "" {
|
||||
return errors.New("bootstrap-kubeconfig is required")
|
||||
}
|
||||
|
||||
if err := o.AgentOptions.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.AgentName == "" {
|
||||
return errors.New("agent name is empty")
|
||||
}
|
||||
|
||||
// if SpokeExternalServerURLs is specified we validate every URL in it, we expect the spoke external server URL is https
|
||||
if len(o.SpokeExternalServerURLs) != 0 {
|
||||
for _, serverURL := range o.SpokeExternalServerURLs {
|
||||
if !helpers.IsValidHTTPSURL(serverURL) {
|
||||
return fmt.Errorf("%q is invalid", serverURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.ClusterHealthCheckPeriod <= 0 {
|
||||
return errors.New("cluster healthcheck period must greater than zero")
|
||||
}
|
||||
|
||||
if o.ClientCertExpirationSeconds != 0 && o.ClientCertExpirationSeconds < 3600 {
|
||||
return errors.New("client certificate expiration seconds must greater or qual to 3600")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Complete fills in missing values.
|
||||
func (o *SpokeAgentOptions) Complete(coreV1Client corev1client.CoreV1Interface, ctx context.Context, recorder events.Recorder) error {
|
||||
// get component namespace of spoke agent
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
o.ComponentNamespace = defaultSpokeComponentNamespace
|
||||
} else {
|
||||
o.ComponentNamespace = string(nsBytes)
|
||||
}
|
||||
|
||||
// dump data in hub kubeconfig secret into file system if it exists
|
||||
err = registration.DumpSecret(coreV1Client, o.ComponentNamespace, o.HubKubeconfigSecret,
|
||||
o.HubKubeconfigDir, ctx, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// load or generate cluster/agent names
|
||||
o.AgentOptions.SpokeClusterName, o.AgentName = o.getOrGenerateClusterAgentNames()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateClusterName generates a name for spoke cluster
|
||||
func generateClusterName() string {
|
||||
return string(uuid.NewUUID())
|
||||
}
|
||||
|
||||
// generateAgentName generates a random name for spoke cluster agent
|
||||
func generateAgentName() string {
|
||||
return utilrand.String(spokeAgentNameLength)
|
||||
}
|
||||
|
||||
// hasValidHubClientConfig returns ture if all the conditions below are met:
|
||||
// HasValidHubClientConfig returns ture if all the conditions below are met:
|
||||
// 1. KubeconfigFile exists;
|
||||
// 2. TLSKeyFile exists;
|
||||
// 3. TLSCertFile exists;
|
||||
@@ -511,20 +409,19 @@ func generateAgentName() string {
|
||||
// Normally, KubeconfigFile/TLSKeyFile/TLSCertFile will be created once the bootstrap process
|
||||
// completes. Changing the name of the cluster will make the existing hub kubeconfig invalid,
|
||||
// because certificate in TLSCertFile is issued to a specific cluster/agent.
|
||||
func (o *SpokeAgentOptions) hasValidHubClientConfig(ctx context.Context) (bool, error) {
|
||||
kubeconfigPath := path.Join(o.HubKubeconfigDir, clientcert.KubeconfigFile)
|
||||
if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
|
||||
klog.V(4).Infof("Kubeconfig file %q not found", kubeconfigPath)
|
||||
func (o *SpokeAgentConfig) HasValidHubClientConfig(_ context.Context) (bool, error) {
|
||||
if _, err := os.Stat(o.agentOptions.HubKubeconfigFile); os.IsNotExist(err) {
|
||||
klog.V(4).Infof("Kubeconfig file %q not found", o.agentOptions.HubKubeconfigFile)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
keyPath := path.Join(o.HubKubeconfigDir, clientcert.TLSKeyFile)
|
||||
keyPath := path.Join(o.agentOptions.HubKubeconfigDir, clientcert.TLSKeyFile)
|
||||
if _, err := os.Stat(keyPath); os.IsNotExist(err) {
|
||||
klog.V(4).Infof("TLS key file %q not found", keyPath)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certPath := path.Join(o.agentOptions.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certData, err := os.ReadFile(path.Clean(certPath))
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to load TLS cert file %q", certPath)
|
||||
@@ -536,86 +433,19 @@ func (o *SpokeAgentOptions) hasValidHubClientConfig(ctx context.Context) (bool,
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if clusterName != o.AgentOptions.SpokeClusterName || agentName != o.AgentName {
|
||||
if clusterName != o.agentOptions.SpokeClusterName || agentName != o.agentOptions.AgentID {
|
||||
klog.V(4).Infof("Certificate in file %q is issued for agent %q instead of %q",
|
||||
certPath, fmt.Sprintf("%s:%s", clusterName, agentName),
|
||||
fmt.Sprintf("%s:%s", o.AgentOptions.SpokeClusterName, o.AgentName))
|
||||
fmt.Sprintf("%s:%s", o.agentOptions.SpokeClusterName, o.agentOptions.AgentID))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return clientcert.IsCertificateValid(certData, nil)
|
||||
}
|
||||
|
||||
// getOrGenerateClusterAgentNames returns cluster name and agent name.
|
||||
// Rules for picking up cluster name:
|
||||
// 1. Use cluster name from input arguments if 'cluster-name' is specified;
|
||||
// 2. Parse cluster name from the common name of the certification subject if the certification exists;
|
||||
// 3. Fallback to cluster name in the mounted secret if it exists;
|
||||
// 4. TODO: Read cluster name from openshift struct if the agent is running in an openshift cluster;
|
||||
// 5. Generate a random cluster name then;
|
||||
|
||||
// Rules for picking up agent name:
|
||||
// 1. Parse agent name from the common name of the certification subject if the certification exists;
|
||||
// 2. Fallback to agent name in the mounted secret if it exists;
|
||||
// 3. Generate a random agent name then;
|
||||
func (o *SpokeAgentOptions) getOrGenerateClusterAgentNames() (string, string) {
|
||||
// try to load cluster/agent name from tls certification
|
||||
var clusterNameInCert, agentNameInCert string
|
||||
certPath := path.Join(o.HubKubeconfigDir, clientcert.TLSCertFile)
|
||||
certData, certErr := os.ReadFile(path.Clean(certPath))
|
||||
if certErr == nil {
|
||||
clusterNameInCert, agentNameInCert, _ = registration.GetClusterAgentNamesFromCertificate(certData)
|
||||
}
|
||||
|
||||
clusterName := o.AgentOptions.SpokeClusterName
|
||||
// if cluster name is not specified with input argument, try to load it from file
|
||||
if clusterName == "" {
|
||||
// TODO, read cluster name from openshift struct if the spoke agent is running in an openshift cluster
|
||||
|
||||
// and then load the cluster name from the mounted secret
|
||||
clusterNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.ClusterNameFile)
|
||||
clusterNameBytes, err := os.ReadFile(path.Clean(clusterNameFilePath))
|
||||
switch {
|
||||
case len(clusterNameInCert) > 0:
|
||||
// use cluster name loaded from the tls certification
|
||||
clusterName = clusterNameInCert
|
||||
if clusterNameInCert != string(clusterNameBytes) {
|
||||
klog.Warningf("Use cluster name %q in certification instead of %q in the mounted secret", clusterNameInCert, string(clusterNameBytes))
|
||||
}
|
||||
case err == nil:
|
||||
// use cluster name load from the mounted secret
|
||||
clusterName = string(clusterNameBytes)
|
||||
default:
|
||||
// generate random cluster name
|
||||
clusterName = generateClusterName()
|
||||
}
|
||||
}
|
||||
|
||||
// try to load agent name from the mounted secret
|
||||
agentNameFilePath := path.Join(o.HubKubeconfigDir, clientcert.AgentNameFile)
|
||||
agentNameBytes, err := os.ReadFile(path.Clean(agentNameFilePath))
|
||||
var agentName string
|
||||
switch {
|
||||
case len(agentNameInCert) > 0:
|
||||
// use agent name loaded from the tls certification
|
||||
agentName = agentNameInCert
|
||||
if agentNameInCert != string(agentNameBytes) {
|
||||
klog.Warningf("Use agent name %q in certification instead of %q in the mounted secret", agentNameInCert, string(agentNameBytes))
|
||||
}
|
||||
case err == nil:
|
||||
// use agent name loaded from the mounted secret
|
||||
agentName = string(agentNameBytes)
|
||||
default:
|
||||
// generate random agent name
|
||||
agentName = generateAgentName()
|
||||
}
|
||||
|
||||
return clusterName, agentName
|
||||
}
|
||||
|
||||
// getSpokeClusterCABundle returns the spoke cluster Kubernetes client CA data when SpokeExternalServerURLs is specified
|
||||
func (o *SpokeAgentOptions) getSpokeClusterCABundle(kubeConfig *rest.Config) ([]byte, error) {
|
||||
if len(o.SpokeExternalServerURLs) == 0 {
|
||||
func (o *SpokeAgentConfig) getSpokeClusterCABundle(kubeConfig *rest.Config) ([]byte, error) {
|
||||
if len(o.registrationOption.SpokeExternalServerURLs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if kubeConfig.CAData != nil {
|
||||
|
||||
@@ -8,141 +8,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/operator/events/eventstesting"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing"
|
||||
)
|
||||
|
||||
func TestComplete(t *testing.T) {
|
||||
// get component namespace
|
||||
var componentNamespace string
|
||||
nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
componentNamespace = defaultSpokeComponentNamespace
|
||||
} else {
|
||||
componentNamespace = string(nsBytes)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
secret *corev1.Secret
|
||||
expectedClusterName string
|
||||
expectedAgentName string
|
||||
}{
|
||||
{
|
||||
name: "generate random cluster/agent name",
|
||||
},
|
||||
{
|
||||
name: "specify cluster name",
|
||||
clusterName: "cluster1",
|
||||
expectedClusterName: "cluster1",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in secret with specified value",
|
||||
clusterName: "cluster1",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
|
||||
"cluster-name": []byte("cluster2"),
|
||||
"agent-name": []byte("agent2"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent2",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in cert with specified value",
|
||||
clusterName: "cluster1",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster2:agent2", 60*time.Second), map[string][]byte{
|
||||
"kubeconfig": testinghelpers.NewKubeconfig(nil, nil),
|
||||
"cluster-name": []byte("cluster3"),
|
||||
"agent-name": []byte("agent3"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent2",
|
||||
},
|
||||
{
|
||||
name: "take cluster/agent name from secret",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", nil, map[string][]byte{
|
||||
"cluster-name": []byte("cluster1"),
|
||||
"agent-name": []byte("agent1"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
{
|
||||
name: "take cluster/agent name from cert",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
{
|
||||
name: "override cluster name in secret with value from cert",
|
||||
secret: testinghelpers.NewHubKubeconfigSecret(componentNamespace, "hub-kubeconfig-secret", "", testinghelpers.NewTestCert("system:open-cluster-management:cluster1:agent1", 60*time.Second), map[string][]byte{
|
||||
"cluster-name": []byte("cluster2"),
|
||||
"agent-name": []byte("agent2"),
|
||||
}),
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// setup kube client
|
||||
objects := []runtime.Object{}
|
||||
if c.secret != nil {
|
||||
objects = append(objects, c.secret)
|
||||
}
|
||||
kubeClient := kubefake.NewSimpleClientset(objects...)
|
||||
|
||||
// create a tmp dir to dump hub kubeconfig
|
||||
dir, err := os.MkdirTemp("", "hub-kubeconfig")
|
||||
if err != nil {
|
||||
t.Error("unable to create a tmp dir")
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
options := &SpokeAgentOptions{
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: c.clusterName,
|
||||
},
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: dir,
|
||||
}
|
||||
|
||||
if err := options.Complete(kubeClient.CoreV1(), context.TODO(), eventstesting.NewTestingEventRecorder(t)); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if options.ComponentNamespace == "" {
|
||||
t.Error("component namespace should not be empty")
|
||||
}
|
||||
if options.AgentOptions.SpokeClusterName == "" {
|
||||
t.Error("cluster name should not be empty")
|
||||
}
|
||||
if options.AgentName == "" {
|
||||
t.Error("agent name should not be empty")
|
||||
}
|
||||
if len(c.expectedClusterName) > 0 && options.AgentOptions.SpokeClusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, options.AgentOptions.SpokeClusterName)
|
||||
}
|
||||
if len(c.expectedAgentName) > 0 && options.AgentName != c.expectedAgentName {
|
||||
t.Errorf("expect agent name %q but got %q", c.expectedAgentName, options.AgentName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
defaultCompletedOptions := NewSpokeAgentOptions()
|
||||
defaultCompletedOptions.BootstrapKubeconfig = "/spoke/bootstrap/kubeconfig"
|
||||
defaultCompletedOptions.AgentOptions.SpokeClusterName = "testcluster"
|
||||
defaultCompletedOptions.AgentName = "testagent"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -154,24 +29,10 @@ func TestValidate(t *testing.T) {
|
||||
options: &SpokeAgentOptions{},
|
||||
expectedErr: "bootstrap-kubeconfig is required",
|
||||
},
|
||||
{
|
||||
name: "no cluster name",
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{}},
|
||||
expectedErr: "cluster name is empty",
|
||||
},
|
||||
{
|
||||
name: "no agent name",
|
||||
options: &SpokeAgentOptions{BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig", AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "testcluster"}},
|
||||
expectedErr: "agent name is empty",
|
||||
},
|
||||
{
|
||||
name: "invalid external server URLs",
|
||||
options: &SpokeAgentOptions{
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
SpokeExternalServerURLs: []string{"https://127.0.0.1:64433", "http://127.0.0.1:8080"},
|
||||
},
|
||||
expectedErr: "\"http://127.0.0.1:8080\" is invalid",
|
||||
@@ -179,11 +40,7 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "invalid cluster healthcheck period",
|
||||
options: &SpokeAgentOptions{
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClusterHealthCheckPeriod: 0,
|
||||
},
|
||||
expectedErr: "cluster healthcheck period must greater than zero",
|
||||
@@ -196,15 +53,10 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "default completed options",
|
||||
options: &SpokeAgentOptions{
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClientCertExpirationSeconds: 3599,
|
||||
},
|
||||
expectedErr: "client certificate expiration seconds must greater or qual to 3600",
|
||||
@@ -212,15 +64,10 @@ func TestValidate(t *testing.T) {
|
||||
{
|
||||
name: "default completed options",
|
||||
options: &SpokeAgentOptions{
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
HubKubeconfigDir: "/spoke/hub-kubeconfig",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: "testcluster",
|
||||
},
|
||||
AgentName: "testagent",
|
||||
HubKubeconfigSecret: "hub-kubeconfig-secret",
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: 20,
|
||||
BootstrapKubeconfig: "/spoke/bootstrap/kubeconfig",
|
||||
ClientCertExpirationSeconds: 3600,
|
||||
},
|
||||
expectedErr: "",
|
||||
@@ -301,14 +148,16 @@ func TestHasValidHubClientConfig(t *testing.T) {
|
||||
testinghelpers.WriteFile(path.Join(tempDir, "tls.crt"), c.tlsCert)
|
||||
}
|
||||
|
||||
options := &SpokeAgentOptions{
|
||||
AgentOptions: &commonoptions.AgentOptions{
|
||||
SpokeClusterName: c.clusterName,
|
||||
},
|
||||
AgentName: c.agentName,
|
||||
agentOpts := &commonoptions.AgentOptions{
|
||||
SpokeClusterName: c.clusterName,
|
||||
AgentID: c.agentName,
|
||||
HubKubeconfigDir: tempDir,
|
||||
}
|
||||
valid, err := options.hasValidHubClientConfig(context.TODO())
|
||||
cfg := NewSpokeAgentConfig(agentOpts, NewSpokeAgentOptions())
|
||||
if err := agentOpts.Complete(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
valid, err := cfg.HasValidHubClientConfig(context.TODO())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -319,54 +168,6 @@ func TestHasValidHubClientConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOrGenerateClusterAgentNames(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "testgetorgenerateclusteragentnames")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
options *SpokeAgentOptions
|
||||
expectedClusterName string
|
||||
expectedAgentName string
|
||||
}{
|
||||
{
|
||||
name: "cluster name is specified",
|
||||
options: &SpokeAgentOptions{AgentOptions: &commonoptions.AgentOptions{SpokeClusterName: "cluster0"}},
|
||||
expectedClusterName: "cluster0",
|
||||
},
|
||||
{
|
||||
name: "cluster name and agent name are in file",
|
||||
options: &SpokeAgentOptions{HubKubeconfigDir: tempDir, AgentOptions: &commonoptions.AgentOptions{}},
|
||||
expectedClusterName: "cluster1",
|
||||
expectedAgentName: "agent1",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
if c.options.HubKubeconfigDir != "" {
|
||||
testinghelpers.WriteFile(path.Join(tempDir, clientcert.ClusterNameFile), []byte(c.expectedClusterName))
|
||||
testinghelpers.WriteFile(path.Join(tempDir, clientcert.AgentNameFile), []byte(c.expectedAgentName))
|
||||
}
|
||||
clusterName, agentName := c.options.getOrGenerateClusterAgentNames()
|
||||
if clusterName != c.expectedClusterName {
|
||||
t.Errorf("expect cluster name %q but got %q", c.expectedClusterName, clusterName)
|
||||
}
|
||||
|
||||
// agent name cannot be empty, it is either generated or from file
|
||||
if agentName == "" {
|
||||
t.Error("agent name should not be empty")
|
||||
}
|
||||
|
||||
if c.expectedAgentName != "" && c.expectedAgentName != agentName {
|
||||
t.Errorf("expect agent name %q but got %q", c.expectedAgentName, agentName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSpokeClusterCABundle(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "testgetspokeclustercabundle")
|
||||
if err != nil {
|
||||
@@ -418,7 +219,8 @@ func TestGetSpokeClusterCABundle(t *testing.T) {
|
||||
restConig.CAData = nil
|
||||
restConig.CAFile = path.Join(tempDir, c.caFile)
|
||||
}
|
||||
caData, err := c.options.getSpokeClusterCABundle(restConig)
|
||||
cfg := NewSpokeAgentConfig(commonoptions.NewAgentOptions(), c.options)
|
||||
caData, err := cfg.getSpokeClusterCABundle(restConig)
|
||||
testingcommon.AssertError(t, err, c.expectedErr)
|
||||
if c.expectedCAData == nil && caData == nil {
|
||||
return
|
||||
|
||||
58
pkg/singleton/spoke/agent.go
Normal file
58
pkg/singleton/spoke/agent.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package spoke
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
registration "open-cluster-management.io/ocm/pkg/registration/spoke"
|
||||
work "open-cluster-management.io/ocm/pkg/work/spoke"
|
||||
)
|
||||
|
||||
type AgentConfig struct {
|
||||
agentOption *commonoptions.AgentOptions
|
||||
registrationOption *registration.SpokeAgentOptions
|
||||
workOption *work.WorkloadAgentOptions
|
||||
}
|
||||
|
||||
func NewAgentConfig(
|
||||
agentOption *commonoptions.AgentOptions,
|
||||
registrationOption *registration.SpokeAgentOptions,
|
||||
workOption *work.WorkloadAgentOptions) *AgentConfig {
|
||||
return &AgentConfig{
|
||||
agentOption: agentOption,
|
||||
registrationOption: registrationOption,
|
||||
workOption: workOption,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AgentConfig) RunSpokeAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
registrationCfg := registration.NewSpokeAgentConfig(a.agentOption, a.registrationOption)
|
||||
// start registration agent at first
|
||||
go func() {
|
||||
if err := registrationCfg.RunSpokeAgent(ctx, controllerContext); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for the hub client config ready.
|
||||
klog.Info("Waiting for hub client config and managed cluster to be ready")
|
||||
if err := wait.PollUntilContextCancel(ctx, 1*time.Second, true, registrationCfg.HasValidHubClientConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
workCfg := work.NewWorkAgentConfig(a.agentOption, a.workOption)
|
||||
// start work agent
|
||||
go func() {
|
||||
if err := workCfg.RunWorkloadAgent(ctx, controllerContext); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
@@ -9,15 +9,18 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
fakedynamic "k8s.io/client-go/dynamic/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/patcher"
|
||||
testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/controllers"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/spoketesting"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke/statusfeedback"
|
||||
@@ -218,6 +221,7 @@ func TestSyncManifestWork(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStatusFeedback(t *testing.T) {
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates))
|
||||
cases := []struct {
|
||||
name string
|
||||
existingResources []runtime.Object
|
||||
|
||||
28
pkg/work/spoke/options.go
Normal file
28
pkg/work/spoke/options.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package spoke
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// WorkloadAgentOptions defines the flags for workload agent
|
||||
type WorkloadAgentOptions struct {
|
||||
StatusSyncInterval time.Duration
|
||||
AppliedManifestWorkEvictionGracePeriod time.Duration
|
||||
}
|
||||
|
||||
// NewWorkloadAgentOptions returns the flags with default value set
|
||||
func NewWorkloadAgentOptions() *WorkloadAgentOptions {
|
||||
return &WorkloadAgentOptions{
|
||||
StatusSyncInterval: 10 * time.Second,
|
||||
AppliedManifestWorkEvictionGracePeriod: 60 * time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags register and binds the default flags
|
||||
func (o *WorkloadAgentOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.")
|
||||
fs.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period",
|
||||
o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction")
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openshift/library-go/pkg/controller/controllercmd"
|
||||
"github.com/spf13/cobra"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -39,47 +38,29 @@ const (
|
||||
availableStatusControllerWorkers = 10
|
||||
)
|
||||
|
||||
// WorkloadAgentOptions defines the flags for workload agent
|
||||
type WorkloadAgentOptions struct {
|
||||
AgentOptions *commonoptions.AgentOptions
|
||||
HubKubeconfigFile string
|
||||
AgentID string
|
||||
StatusSyncInterval time.Duration
|
||||
AppliedManifestWorkEvictionGracePeriod time.Duration
|
||||
type WorkAgentConfig struct {
|
||||
agentOptions *commonoptions.AgentOptions
|
||||
workOptions *WorkloadAgentOptions
|
||||
}
|
||||
|
||||
// NewWorkloadAgentOptions returns the flags with default value set
|
||||
func NewWorkloadAgentOptions() *WorkloadAgentOptions {
|
||||
return &WorkloadAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
StatusSyncInterval: 10 * time.Second,
|
||||
AppliedManifestWorkEvictionGracePeriod: 60 * time.Minute,
|
||||
// NewWorkAgentConfig returns a WorkAgentConfig
|
||||
func NewWorkAgentConfig(commonOpts *commonoptions.AgentOptions, opts *WorkloadAgentOptions) *WorkAgentConfig {
|
||||
return &WorkAgentConfig{
|
||||
agentOptions: commonOpts,
|
||||
workOptions: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags register and binds the default flags
|
||||
func (o *WorkloadAgentOptions) AddFlags(cmd *cobra.Command) {
|
||||
flags := cmd.Flags()
|
||||
o.AgentOptions.AddFlags(flags)
|
||||
features.DefaultSpokeWorkMutableFeatureGate.AddFlag(flags)
|
||||
// This command only supports reading from config
|
||||
flags.StringVar(&o.HubKubeconfigFile, "hub-kubeconfig", o.HubKubeconfigFile, "Location of kubeconfig file to connect to hub cluster.")
|
||||
flags.StringVar(&o.AgentID, "agent-id", o.AgentID, "ID of the work agent to identify the work this agent should handle after restart/recovery.")
|
||||
flags.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.")
|
||||
flags.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period",
|
||||
o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction")
|
||||
}
|
||||
|
||||
// RunWorkloadAgent starts the controllers on agent to process work from hub.
|
||||
func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error {
|
||||
// build hub client and informer
|
||||
hubRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.HubKubeconfigFile)
|
||||
hubRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.agentOptions.HubKubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hubhash := helper.HubHash(hubRestConfig.Host)
|
||||
|
||||
agentID := o.AgentID
|
||||
agentID := o.agentOptions.AgentID
|
||||
if len(agentID) == 0 {
|
||||
agentID = hubhash
|
||||
}
|
||||
@@ -90,11 +71,11 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
}
|
||||
// Only watch the cluster namespace on hub
|
||||
workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute,
|
||||
workinformers.WithNamespace(o.AgentOptions.SpokeClusterName))
|
||||
workinformers.WithNamespace(o.agentOptions.SpokeClusterName))
|
||||
|
||||
// load spoke client config and create spoke clients,
|
||||
// the work agent may not running in the spoke/managed cluster.
|
||||
spokeRestConfig, err := o.AgentOptions.SpokeKubeConfig(controllerContext.KubeConfig)
|
||||
spokeRestConfig, err := o.agentOptions.SpokeKubeConfig(controllerContext.KubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -130,19 +111,19 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
spokeRestConfig,
|
||||
spokeKubeClient,
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
o.AgentOptions.SpokeClusterName,
|
||||
o.agentOptions.SpokeClusterName,
|
||||
controllerContext.EventRecorder,
|
||||
restMapper,
|
||||
).NewExecutorValidator(ctx, features.DefaultSpokeWorkMutableFeatureGate.Enabled(ocmfeature.ExecutorValidatingCaches))
|
||||
).NewExecutorValidator(ctx, features.SpokeMutableFeatureGate.Enabled(ocmfeature.ExecutorValidatingCaches))
|
||||
|
||||
manifestWorkController := manifestcontroller.NewManifestWorkController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
spokeKubeClient,
|
||||
spokeAPIExtensionClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash, agentID,
|
||||
@@ -151,9 +132,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
)
|
||||
addFinalizerController := finalizercontroller.NewAddFinalizerController(
|
||||
controllerContext.EventRecorder,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
)
|
||||
appliedManifestWorkFinalizeController := finalizercontroller.NewAppliedManifestWorkFinalizeController(
|
||||
controllerContext.EventRecorder,
|
||||
@@ -164,9 +145,9 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
)
|
||||
manifestWorkFinalizeController := finalizercontroller.NewManifestWorkFinalizeController(
|
||||
controllerContext.EventRecorder,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash,
|
||||
@@ -174,17 +155,17 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
unmanagedAppliedManifestWorkController := finalizercontroller.NewUnManagedAppliedWorkController(
|
||||
controllerContext.EventRecorder,
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
o.AppliedManifestWorkEvictionGracePeriod,
|
||||
o.workOptions.AppliedManifestWorkEvictionGracePeriod,
|
||||
hubhash, agentID,
|
||||
)
|
||||
appliedManifestWorkController := appliedmanifestcontroller.NewAppliedManifestWorkController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
spokeWorkClient.WorkV1().AppliedManifestWorks(),
|
||||
spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(),
|
||||
hubhash,
|
||||
@@ -192,10 +173,10 @@ func (o *WorkloadAgentOptions) RunWorkloadAgent(ctx context.Context, controllerC
|
||||
availableStatusController := statuscontroller.NewAvailableStatusController(
|
||||
controllerContext.EventRecorder,
|
||||
spokeDynamicClient,
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
workInformerFactory.Work().V1().ManifestWorks(),
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.AgentOptions.SpokeClusterName),
|
||||
o.StatusSyncInterval,
|
||||
workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName),
|
||||
o.workOptions.StatusSyncInterval,
|
||||
)
|
||||
|
||||
go workInformerFactory.Start(ctx.Done())
|
||||
|
||||
@@ -142,7 +142,7 @@ func getValueByJsonPath(name, path string, obj *unstructured.Unstructured) (*wor
|
||||
Value: fieldValue,
|
||||
}, nil
|
||||
default:
|
||||
if features.DefaultSpokeWorkMutableFeatureGate.Enabled(ocmfeature.RawFeedbackJsonString) {
|
||||
if features.SpokeMutableFeatureGate.Enabled(ocmfeature.RawFeedbackJsonString) {
|
||||
jsonRaw, err := json.Marshal(&t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse the resource to json string for name %s: %v", name, err)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
@@ -126,6 +127,7 @@ func unstrctureObject(data string) *unstructured.Unstructured {
|
||||
}
|
||||
|
||||
func TestStatusReader(t *testing.T) {
|
||||
utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates))
|
||||
cases := []struct {
|
||||
name string
|
||||
object *unstructured.Unstructured
|
||||
@@ -338,7 +340,7 @@ func TestStatusReader(t *testing.T) {
|
||||
reader := NewStatusReader()
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
err := features.DefaultSpokeWorkMutableFeatureGate.Set(fmt.Sprintf("%s=%t", ocmfeature.RawFeedbackJsonString, c.enableRaw))
|
||||
err := features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=%t", ocmfeature.RawFeedbackJsonString, c.enableRaw))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ test-e2e: deploy-hub deploy-spoke-operator run-e2e
|
||||
|
||||
run-e2e: cluster-ip bootstrap-secret
|
||||
go test -c ./test/e2e
|
||||
./e2e.test -test.v -ginkgo.v -deploy-klusterlet=true -nil-executor-validating=true -registration-image=$(REGISTRATION_IMAGE) -work-image=$(WORK_IMAGE) -klusterlet-deploy-mode=$(KLUSTERLET_DEPLOY_MODE)
|
||||
./e2e.test -test.v -ginkgo.v -deploy-klusterlet=true -nil-executor-validating=true -registration-image=$(REGISTRATION_IMAGE) -work-image=$(WORK_IMAGE) -singleton-image=$(OPERATOR_IMAGE_NAME) -klusterlet-deploy-mode=$(KLUSTERLET_DEPLOY_MODE)
|
||||
|
||||
clean-hub: clean-hub-cr clean-hub-operator
|
||||
|
||||
@@ -60,7 +60,7 @@ deploy-spoke-operator: ensure-kustomize
|
||||
|
||||
apply-spoke-cr: bootstrap-secret
|
||||
$(KUSTOMIZE) build deploy/klusterlet/config/samples \
|
||||
| $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,cluster1,$(MANAGED_CLUSTER_NAME)," \
|
||||
| $(SED_CMD) -e "s,quay.io/open-cluster-management/registration,$(REGISTRATION_IMAGE)," -e "s,quay.io/open-cluster-management/work,$(WORK_IMAGE)," -e "s,quay.io/open-cluster-management/registration-operator,$(OPERATOR_IMAGE_NAME)," -e "s,cluster1,$(MANAGED_CLUSTER_NAME)," \
|
||||
| $(KUBECTL) apply -f -
|
||||
|
||||
clean-hub-cr:
|
||||
|
||||
@@ -75,12 +75,13 @@ type Tester struct {
|
||||
klusterletOperator string
|
||||
registrationImage string
|
||||
workImage string
|
||||
singletonImage string
|
||||
}
|
||||
|
||||
// kubeconfigPath is the path of kubeconfig file, will be get from env "KUBECONFIG" by default.
|
||||
// bootstrapHubSecret is the bootstrap hub kubeconfig secret, and the format is "namespace/secretName".
|
||||
// Default of bootstrapHubSecret is helpers.KlusterletDefaultNamespace/helpers.BootstrapHubKubeConfig.
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workImage string, timeout time.Duration) *Tester {
|
||||
func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workImage, singletonImage string, timeout time.Duration) *Tester {
|
||||
var tester = Tester{
|
||||
hubKubeConfigPath: hubKubeConfigPath,
|
||||
spokeKubeConfigPath: spokeKubeConfigPath,
|
||||
@@ -92,6 +93,7 @@ func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workIm
|
||||
klusterletOperator: "klusterlet",
|
||||
registrationImage: registrationImage,
|
||||
workImage: workImage,
|
||||
singletonImage: singletonImage,
|
||||
}
|
||||
|
||||
return &tester
|
||||
@@ -224,6 +226,7 @@ func (t *Tester) CreateKlusterlet(name, clusterName, klusterletNamespace string,
|
||||
Spec: operatorapiv1.KlusterletSpec{
|
||||
RegistrationImagePullSpec: t.registrationImage,
|
||||
WorkImagePullSpec: t.workImage,
|
||||
ImagePullSpec: t.singletonImage,
|
||||
ExternalServerURLs: []operatorapiv1.ServerURL{
|
||||
{
|
||||
URL: "https://localhost",
|
||||
|
||||
@@ -26,6 +26,7 @@ var (
|
||||
eventuallyTimeout time.Duration
|
||||
registrationImage string
|
||||
workImage string
|
||||
singletonImage string
|
||||
klusterletDeployMode string
|
||||
)
|
||||
|
||||
@@ -38,11 +39,12 @@ func init() {
|
||||
flag.DurationVar(&eventuallyTimeout, "eventually-timeout", 60*time.Second, "The timeout of Gomega's Eventually (default 60 seconds)")
|
||||
flag.StringVar(®istrationImage, "registration-image", "", "The image of the registration")
|
||||
flag.StringVar(&workImage, "work-image", "", "The image of the work")
|
||||
flag.StringVar(&singletonImage, "singleton-image", "", "The image of the klusterlet agent")
|
||||
flag.StringVar(&klusterletDeployMode, "klusterlet-deploy-mode", string(operatorapiv1.InstallModeDefault), "The image of the work")
|
||||
}
|
||||
|
||||
func TestE2E(tt *testing.T) {
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, registrationImage, workImage, eventuallyTimeout)
|
||||
t = NewTester(hubKubeconfig, managedKubeconfig, registrationImage, workImage, singletonImage, eventuallyTimeout)
|
||||
|
||||
OutputFail := func(message string, callerSkip ...int) {
|
||||
t.OutputDebugLogs()
|
||||
|
||||
214
test/integration/operator/klusterlet_singleton_test.go
Normal file
214
test/integration/operator/klusterlet_singleton_test.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package operator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
|
||||
operatorapiv1 "open-cluster-management.io/api/operator/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/operator/helpers"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Klusterlet Singleton mode", func() {
|
||||
var cancel context.CancelFunc
|
||||
var klusterlet *operatorapiv1.Klusterlet
|
||||
var klusterletNamespace string
|
||||
var agentNamespace string
|
||||
var registrationManagementRoleName string
|
||||
var registrationManagedRoleName string
|
||||
var deploymentName string
|
||||
var saName string
|
||||
var workManagementRoleName string
|
||||
var workManagedRoleName string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
var ctx context.Context
|
||||
klusterlet = &operatorapiv1.Klusterlet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("klusterlet-%s", rand.String(6)),
|
||||
},
|
||||
Spec: operatorapiv1.KlusterletSpec{
|
||||
ImagePullSpec: "quay.io/open-cluster-management/registration-operator",
|
||||
ExternalServerURLs: []operatorapiv1.ServerURL{
|
||||
{
|
||||
URL: "https://localhost",
|
||||
},
|
||||
},
|
||||
ClusterName: "testcluster",
|
||||
DeployOption: operatorapiv1.KlusterletDeployOption{
|
||||
Mode: operatorapiv1.InstallModeSingleton,
|
||||
},
|
||||
},
|
||||
}
|
||||
klusterletNamespace = helpers.KlusterletNamespace(klusterlet)
|
||||
agentNamespace = helpers.AgentNamespace(klusterlet)
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: agentNamespace,
|
||||
},
|
||||
}
|
||||
_, err := kubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startKlusterletOperator(ctx)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := kubeClient.CoreV1().Namespaces().Delete(context.Background(), agentNamespace, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("Deploy and clean klusterlet component", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
deploymentName = fmt.Sprintf("%s-agent", klusterlet.Name)
|
||||
registrationManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-registration:agent", klusterlet.Name)
|
||||
workManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-work:agent", klusterlet.Name)
|
||||
registrationManagedRoleName = fmt.Sprintf("open-cluster-management:%s-registration:agent", klusterlet.Name)
|
||||
workManagedRoleName = fmt.Sprintf("open-cluster-management:%s-work:agent", klusterlet.Name)
|
||||
saName = fmt.Sprintf("%s-agent-sa", klusterlet.Name)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
gomega.Expect(operatorClient.OperatorV1().Klusterlets().Delete(context.Background(), klusterlet.Name, metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should have expected resource created successfully", func() {
|
||||
_, err := operatorClient.OperatorV1().Klusterlets().Create(context.Background(), klusterlet, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check if relatedResources are correct
|
||||
gomega.Eventually(func() error {
|
||||
actual, err := operatorClient.OperatorV1().Klusterlets().Get(context.Background(), klusterlet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("related resources are %v\n", actual.Status.RelatedResources)
|
||||
|
||||
// 10 managed static manifests + 9 management static manifests + 2CRDs + 1 deployments
|
||||
if len(actual.Status.RelatedResources) != 22 {
|
||||
return fmt.Errorf("should get 22 relatedResources, actual got %v", len(actual.Status.RelatedResources))
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check CRDs
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "appliedmanifestworks.work.open-cluster-management.io", metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "clusterclaims.cluster.open-cluster-management.io", metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check clusterrole/clusterrolebinding
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoles().Get(context.Background(), workManagedRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), registrationManagedRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(context.Background(), workManagedRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check role/rolebinding
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().Roles(agentNamespace).Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().Roles(agentNamespace).Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().RoleBindings(agentNamespace).Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().RoleBindings(agentNamespace).Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
// Check extension apiserver rolebinding
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().RoleBindings("kube-system").Get(context.Background(), registrationManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.RbacV1().RoleBindings("kube-system").Get(context.Background(), workManagementRoleName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check service account
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.CoreV1().ServiceAccounts(agentNamespace).Get(context.Background(), saName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check deployment
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.AppsV1().Deployments(agentNamespace).Get(context.Background(), deploymentName, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Check addon namespace
|
||||
addonNamespace := fmt.Sprintf("%s-addon", klusterletNamespace)
|
||||
gomega.Eventually(func() bool {
|
||||
if _, err := kubeClient.CoreV1().Namespaces().Get(context.Background(), addonNamespace, metav1.GetOptions{}); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "Applied", "KlusterletApplied", metav1.ConditionTrue)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -165,20 +165,20 @@ var _ = ginkgo.Describe("Addon Lease Resync", func() {
|
||||
hubKubeconfigDir = path.Join(util.TestDir, fmt.Sprintf("addontest-%s", suffix), "hub-kubeconfig")
|
||||
addOnName = fmt.Sprintf("addon-%s", suffix)
|
||||
|
||||
err := features.DefaultSpokeRegistrationMutableFeatureGate.Set("AddonManagement=true")
|
||||
err := features.SpokeMutableFeatureGate.Set("AddonManagement=true")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel = runAgent("addontest", agentOptions, spokeCfg)
|
||||
cancel = runAgent("addontest", agentOptions, commOptions, spokeCfg)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
|
||||
@@ -39,21 +39,21 @@ var _ = ginkgo.Describe("Addon Registration", func() {
|
||||
hubKubeconfigDir = path.Join(util.TestDir, fmt.Sprintf("addontest-%s", suffix), "hub-kubeconfig")
|
||||
addOnName = fmt.Sprintf("addon-%s", suffix)
|
||||
|
||||
err := features.DefaultSpokeRegistrationMutableFeatureGate.Set("AddonManagement=true")
|
||||
err := features.SpokeMutableFeatureGate.Set("AddonManagement=true")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel = runAgent("addontest", agentOptions, spokeCfg)
|
||||
cancel = runAgent("addontest", agentOptions, commOptions, spokeCfg)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(
|
||||
|
||||
@@ -20,18 +20,18 @@ var _ = ginkgo.Describe("Certificate Rotation", func() {
|
||||
hubKubeconfigSecret := "rotationtest-hub-kubeconfig-secret"
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "rotationtest", "hub-kubeconfig")
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel := runAgent("rotationtest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("rotationtest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// after bootstrap the spokecluster and csr should be created
|
||||
|
||||
@@ -89,18 +89,18 @@ var _ = ginkgo.Describe("Disaster Recovery", func() {
|
||||
}
|
||||
|
||||
startRegistrationAgent := func(managedClusterName, bootstrapKubeConfigFile, hubKubeconfigSecret, hubKubeconfigDir string) context.CancelFunc {
|
||||
err := features.DefaultSpokeRegistrationMutableFeatureGate.Set("AddonManagement=true")
|
||||
err := features.SpokeMutableFeatureGate.Set("AddonManagement=true")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
return runAgent("addontest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
return runAgent("addontest", agentOptions, commOptions, spokeCfg)
|
||||
}
|
||||
|
||||
assertSuccessClusterBootstrap := func(testNamespace, managedClusterName, hubKubeconfigSecret string, hubKubeClient, spokeKubeClient kubernetes.Interface, hubClusterClient clusterclientset.Interface, auth *util.TestAuthn) {
|
||||
|
||||
@@ -23,7 +23,9 @@ import (
|
||||
clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/registration/clientcert"
|
||||
"open-cluster-management.io/ocm/pkg/registration/hub"
|
||||
@@ -68,10 +70,11 @@ var CRDPaths = []string{
|
||||
"./vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml",
|
||||
}
|
||||
|
||||
func runAgent(name string, opt spoke.SpokeAgentOptions, cfg *rest.Config) context.CancelFunc {
|
||||
func runAgent(name string, opt *spoke.SpokeAgentOptions, commOption *commonoptions.AgentOptions, cfg *rest.Config) context.CancelFunc {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
err := opt.RunSpokeAgent(ctx, &controllercmd.ControllerContext{
|
||||
config := spoke.NewSpokeAgentConfig(commOption, opt)
|
||||
err := config.RunSpokeAgent(ctx, &controllercmd.ControllerContext{
|
||||
KubeConfig: cfg,
|
||||
EventRecorder: util.NewIntegrationTestEventRecorder(name),
|
||||
})
|
||||
@@ -126,7 +129,9 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Expect(cfg).ToNot(gomega.BeNil())
|
||||
|
||||
err = clusterv1.AddToScheme(scheme.Scheme)
|
||||
features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates)
|
||||
|
||||
err = clusterv1.Install(scheme.Scheme)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// prepare configs
|
||||
|
||||
@@ -33,15 +33,15 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
|
||||
ginkgo.It("managed cluster lease should be updated constantly", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, util.TestLeaseDurationSeconds)
|
||||
@@ -52,15 +52,15 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
|
||||
ginkgo.It("managed cluster available condition should be recovered after its lease update is recovered", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-availabletest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-availabletest", agentOptions, commOptions, spokeCfg)
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, util.TestLeaseDurationSeconds)
|
||||
assertAvailableCondition(managedClusterName, metav1.ConditionTrue, 0)
|
||||
@@ -72,15 +72,15 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
gracePeriod := 5 * util.TestLeaseDurationSeconds
|
||||
assertAvailableCondition(managedClusterName, metav1.ConditionUnknown, gracePeriod)
|
||||
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions = &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop = runAgent("cluster-availabletest", agentOptions, spokeCfg)
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stop = runAgent("cluster-availabletest", agentOptions, commOptions, spokeCfg)
|
||||
defer stop()
|
||||
|
||||
// after one grace period, make sure the managed cluster available condition is recovered
|
||||
@@ -90,15 +90,15 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
|
||||
ginkgo.It("managed cluster available condition should be recovered after the cluster is restored", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
cancel := runAgent("cluster-leasetest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, util.TestLeaseDurationSeconds)
|
||||
@@ -141,15 +141,15 @@ var _ = ginkgo.Describe("Cluster Lease Update", func() {
|
||||
|
||||
ginkgo.It("should use a short lease duration", func() {
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-leasetest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stop := runAgent("cluster-leasetest", agentOptions, commOptions, spokeCfg)
|
||||
|
||||
bootstrapManagedCluster(managedClusterName, hubKubeconfigSecret, 60)
|
||||
assertAvailableCondition(managedClusterName, metav1.ConditionTrue, 0)
|
||||
|
||||
@@ -35,16 +35,16 @@ var _ = ginkgo.Describe("Agent Recovery", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// run registration agent with an invalid bootstrap kubeconfig
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("bootstrap-recoverytest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("bootstrap-recoverytest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// the managedcluster should not be created
|
||||
@@ -123,16 +123,16 @@ var _ = ginkgo.Describe("Agent Recovery", func() {
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "hubkubeconfig-recoverytest", "hub-kubeconfig")
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = spokeClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = spokeClusterName
|
||||
|
||||
cancel := runAgent("hubkubeconfig-recoverytest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("hubkubeconfig-recoverytest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// after bootstrap the spokecluster and csr should be created
|
||||
|
||||
@@ -34,16 +34,16 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("run registration agent")
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
stopAgent := runAgent("restart-test", agentOptions, spokeCfg)
|
||||
stopAgent := runAgent("restart-test", agentOptions, commOptions, spokeCfg)
|
||||
|
||||
ginkgo.By("Check existence of csr and ManagedCluster")
|
||||
// the csr should be created
|
||||
@@ -111,15 +111,15 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("Restart registration agent")
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions = &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, spokeCfg)
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, commOptions, spokeCfg)
|
||||
defer stopAgent()
|
||||
|
||||
ginkgo.By("Check if ManagedCluster joins the hub")
|
||||
@@ -164,15 +164,15 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("run registration agent")
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent := runAgent("restart-test", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent := runAgent("restart-test", agentOptions, commOptions, spokeCfg)
|
||||
|
||||
ginkgo.By("Check existence of csr and ManagedCluster")
|
||||
// the csr should be created
|
||||
@@ -226,15 +226,15 @@ var _ = ginkgo.Describe("Agent Restart", func() {
|
||||
|
||||
ginkgo.By("Restart registration agent with a new cluster name")
|
||||
managedClusterName = "restart-test-cluster3"
|
||||
agentOptions = spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions = &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, spokeCfg)
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
stopAgent = runAgent("restart-test", agentOptions, commOptions, spokeCfg)
|
||||
defer stopAgent()
|
||||
|
||||
ginkgo.By("Check the existence of csr and the new ManagedCluster")
|
||||
|
||||
@@ -29,17 +29,17 @@ var _ = ginkgo.Describe("Cluster Auto Approval", func() {
|
||||
err = authn.CreateBootstrapKubeConfigWithUser(bootstrapFile, serverCertFile, securePort, util.AutoApprovalBootstrapUser)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
// run registration agent
|
||||
cancel := runAgent("autoapprovaltest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("autoapprovaltest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// after bootstrap the spokecluster should be accepted and its csr should be auto approved
|
||||
|
||||
@@ -49,16 +49,16 @@ var _ = ginkgo.Describe("Cluster Claim", func() {
|
||||
}
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
MaxCustomClusterClaims: maxCustomClusterClaims,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
cancel = runAgent("claimtest", agentOptions, spokeCfg)
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
cancel = runAgent("claimtest", agentOptions, commOptions, spokeCfg)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(
|
||||
|
||||
@@ -25,16 +25,16 @@ var _ = ginkgo.Describe("Joining Process", func() {
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "joiningtest", "hub-kubeconfig")
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("joiningtest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("joiningtest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// the spoke cluster and csr should be created after bootstrap
|
||||
|
||||
@@ -31,16 +31,16 @@ var _ = ginkgo.Describe("Collecting Node Resource", func() {
|
||||
hubKubeconfigDir := path.Join(util.TestDir, "resorucetest", "hub-kubeconfig")
|
||||
|
||||
// run registration agent
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
cancel := runAgent("resorucetest", agentOptions, spokeCfg)
|
||||
cancel := runAgent("resorucetest", agentOptions, commOptions, spokeCfg)
|
||||
defer cancel()
|
||||
|
||||
// the spoke cluster and csr should be created after bootstrap
|
||||
|
||||
@@ -37,15 +37,17 @@ var _ = ginkgo.Describe("ManagedCluster Taints Update", func() {
|
||||
ctx, stop := context.WithCancel(context.Background())
|
||||
// run registration agent
|
||||
go func() {
|
||||
agentOptions := spoke.SpokeAgentOptions{
|
||||
AgentOptions: commonoptions.NewAgentOptions(),
|
||||
agentOptions := &spoke.SpokeAgentOptions{
|
||||
BootstrapKubeconfig: bootstrapKubeConfigFile,
|
||||
HubKubeconfigSecret: hubKubeconfigSecret,
|
||||
HubKubeconfigDir: hubKubeconfigDir,
|
||||
ClusterHealthCheckPeriod: 1 * time.Minute,
|
||||
}
|
||||
agentOptions.AgentOptions.SpokeClusterName = managedClusterName
|
||||
err := agentOptions.RunSpokeAgent(ctx, &controllercmd.ControllerContext{
|
||||
commOptions := commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigDir = hubKubeconfigDir
|
||||
commOptions.SpokeClusterName = managedClusterName
|
||||
|
||||
agentCfg := spoke.NewSpokeAgentConfig(commOptions, agentOptions)
|
||||
err := agentCfg.RunSpokeAgent(ctx, &controllercmd.ControllerContext{
|
||||
KubeConfig: spokeCfg,
|
||||
EventRecorder: util.NewIntegrationTestEventRecorder("cluster-tainttest"),
|
||||
})
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
|
||||
var work *workapiv1.ManifestWork
|
||||
@@ -31,26 +32,27 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// reset manifests
|
||||
manifests = nil
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -58,7 +60,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -67,29 +69,29 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
var anotherAppliedManifestWorkName string
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
// Create another manifestworks with one shared resource.
|
||||
anotherWork = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
|
||||
anotherWork = util.NewManifestWork(commOptions.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]})
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
|
||||
anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, anotherWork.Name)
|
||||
@@ -98,91 +100,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() {
|
||||
// ensure configmap exists and get its uid
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
currentUID := curentConfigMap.UID
|
||||
|
||||
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
|
||||
gomega.Eventually(func() error {
|
||||
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
|
||||
if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
|
||||
if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete one manifestwork
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
|
||||
gomega.Eventually(func() error {
|
||||
appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
|
||||
gomega.Eventually(func() error {
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentUID != configMap.UID {
|
||||
return fmt.Errorf("UID should be equal")
|
||||
}
|
||||
|
||||
anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources {
|
||||
if appliedResource.Name != "cm1" {
|
||||
return fmt.Errorf("Resource Name should be cm1")
|
||||
}
|
||||
|
||||
if appliedResource.UID != string(currentUID) {
|
||||
return fmt.Errorf("UID should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
|
||||
// ensure configmap exists and get its uid
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
currentUID := curentConfigMap.UID
|
||||
|
||||
@@ -214,14 +132,98 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match")
|
||||
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete one manifestwork
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource
|
||||
gomega.Eventually(func() error {
|
||||
appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork.
|
||||
gomega.Eventually(func() error {
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentUID != configMap.UID {
|
||||
return fmt.Errorf("UID should be equal")
|
||||
}
|
||||
|
||||
anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources {
|
||||
if appliedResource.Name != "cm1" {
|
||||
return fmt.Errorf("resource Name should be cm1")
|
||||
}
|
||||
|
||||
if appliedResource.UID != string(currentUID) {
|
||||
return fmt.Errorf("UID should be equal")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() {
|
||||
// ensure configmap exists and get its uid
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
currentUID := curentConfigMap.UID
|
||||
|
||||
// Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct.
|
||||
gomega.Eventually(func() error {
|
||||
appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range appliedManifestWork.Status.AppliedResources {
|
||||
if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
|
||||
if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("resource name or uid in appliedmanifestwork does not match")
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Update one manifestwork to remove the shared resource
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure the resource is not tracked by the appliedmanifestwork.
|
||||
@@ -242,7 +244,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure the configmap is kept and tracked by anotherappliedmanifestwork
|
||||
gomega.Eventually(func() error {
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
configMap, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -258,7 +260,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources {
|
||||
if appliedResource.Name != "cm1" {
|
||||
return fmt.Errorf("Resource Name should be cm1")
|
||||
return fmt.Errorf("resource Name should be cm1")
|
||||
}
|
||||
|
||||
if appliedResource.UID != string(currentUID) {
|
||||
@@ -275,8 +277,8 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ginkgo.Context("Delete options", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
})
|
||||
|
||||
@@ -285,14 +287,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Ensure configmap exists
|
||||
@@ -300,38 +302,38 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
@@ -347,21 +349,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Ensure configmap exists
|
||||
@@ -369,34 +371,34 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// One of the resource should be deleted.
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
|
||||
// One of the resource should be kept
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -408,21 +410,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Ensure configmap exists
|
||||
@@ -430,13 +432,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -444,26 +446,26 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Remove the resource from the manifests
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Sleep 5 second and check the resource should be kept
|
||||
time.Sleep(5 * time.Second)
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -475,21 +477,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
{
|
||||
Group: "",
|
||||
Resource: "configmaps",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Ensure configmap exists
|
||||
@@ -497,13 +499,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 0 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -511,44 +513,44 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Remove the delete option
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.DeleteOption = nil
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Ensure ownership of configmap is updated
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cm.OwnerReferences) != 1 {
|
||||
return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Delete the work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// All of the resource should be deleted.
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
|
||||
var work *workapiv1.ManifestWork
|
||||
@@ -35,21 +36,22 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
err := features.DefaultSpokeWorkMutableFeatureGate.Set("ExecutorValidatingCaches=true")
|
||||
err := features.SpokeMutableFeatureGate.Set("ExecutorValidatingCaches=true")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// reset manifests
|
||||
manifests = nil
|
||||
@@ -57,7 +59,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Executor = executor
|
||||
})
|
||||
@@ -67,7 +69,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(
|
||||
context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -75,14 +77,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -90,14 +92,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Executor does not have permission", func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
@@ -107,10 +109,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have permission to partial resources", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -123,16 +125,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -144,34 +146,34 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// ensure configmap cm1 exist and cm2 not exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("Executor has permission for all resources", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -184,16 +186,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -205,14 +207,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
@@ -225,14 +227,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -241,10 +243,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is foreground", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -257,16 +259,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -278,14 +280,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
@@ -295,10 +297,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is orphan", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -311,16 +313,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -335,14 +337,14 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
work.Spec.DeleteOption = &workapiv1.DeleteOption{
|
||||
PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan,
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
@@ -352,10 +354,10 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("Executor does not have delete permission and delete option is selectively orphan", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
@@ -368,16 +370,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -395,31 +397,31 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
OrphaningRules: []workapiv1.OrphaningRule{
|
||||
{
|
||||
Resource: "configmaps",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "cm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// ensure configmap cm1 exist and cm2 not exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
@@ -428,20 +430,20 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
executorName := "test-executor"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewRoleForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewRoleForManifest(commOptions.SpokeClusterName, "role-cm-creator", rbacv1.PolicyRule{
|
||||
Verbs: []string{"create", "update", "patch", "get", "list", "delete"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"configmaps"},
|
||||
})),
|
||||
util.ToManifest(util.NewRoleBindingForManifest(o.AgentOptions.SpokeClusterName, "role-cm-creator-binding",
|
||||
util.ToManifest(util.NewRoleBindingForManifest(commOptions.SpokeClusterName, "role-cm-creator-binding",
|
||||
rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: "role-cm-creator",
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
})),
|
||||
}
|
||||
@@ -449,7 +451,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -458,11 +460,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
|
||||
ginkgo.It("no permission", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -474,16 +476,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -495,15 +497,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
@@ -511,17 +513,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap not exist
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("no permission for already existing resource", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -533,16 +535,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -555,11 +557,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// make the role exist with lower permission
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "role-cm-creator",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -571,15 +573,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse, metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse,
|
||||
// the cluster role already esists, so the ailable status is true enen if the applied status is false
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionFalse},
|
||||
@@ -588,17 +590,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmap not exist
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("with permission", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -615,16 +617,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -636,15 +638,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout*3, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
@@ -652,17 +654,17 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmaps exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("with permission for already exist resource", func() {
|
||||
roleName := "role1"
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -679,16 +681,16 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().RoleBindings(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -701,11 +703,11 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// make the role exist with lower permission
|
||||
_, err = spokeKubeClient.RbacV1().Roles(o.AgentOptions.SpokeClusterName).Create(
|
||||
_, err = spokeKubeClient.RbacV1().Roles(commOptions.SpokeClusterName).Create(
|
||||
context.TODO(), &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "role-cm-creator",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
@@ -717,15 +719,15 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout*3, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
@@ -733,7 +735,7 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
// ensure configmaps exist
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
@@ -789,13 +791,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
}
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})),
|
||||
}
|
||||
executor = &workapiv1.ManifestWorkExecutor{
|
||||
Subject: workapiv1.ManifestWorkExecutorSubject{
|
||||
Type: workapiv1.ExecutorSubjectTypeServiceAccount,
|
||||
ServiceAccount: &workapiv1.ManifestWorkSubjectServiceAccount{
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: executorName,
|
||||
},
|
||||
},
|
||||
@@ -803,53 +805,53 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Permission change", func() {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(
|
||||
context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("ensure configmaps do not exist")
|
||||
util.AssertNonexistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
createRBAC(o.AgentOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm2")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
createRBAC(commOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, "cm2")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("ensure configmaps cm1 and cm2 exist")
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
deleteRBAC(o.AgentOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, o.AgentOptions.SpokeClusterName, "cm3")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied),
|
||||
deleteRBAC(commOptions.SpokeClusterName, executorName)
|
||||
addConfigMapToManifestWork(hubWorkClient, work.Name, commOptions.SpokeClusterName, "cm3")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionFalse,
|
||||
metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable),
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable,
|
||||
metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue,
|
||||
metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("ensure configmap cm1 cm2 exist(will not delete the applied resource even the permison is revoked) but cm3 does not exist")
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertExistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertNonexistenceOfConfigMaps(
|
||||
[]workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"a": "b"}, nil)),
|
||||
}, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -213,7 +213,7 @@ func assertWorksByReplicaSet(clusterNames sets.Set[string], mwrs *workapiv1alpha
|
||||
}
|
||||
|
||||
if len(works.Items) != clusterNames.Len() {
|
||||
return fmt.Errorf("The number of applied works should equal to %d, but got %d", clusterNames.Len(), len(works.Items))
|
||||
return fmt.Errorf("the number of applied works should equal to %d, but got %d", clusterNames.Len(), len(works.Items))
|
||||
}
|
||||
|
||||
for _, work := range works.Items {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
|
||||
var work *workapiv1.ManifestWork
|
||||
@@ -34,13 +35,14 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
@@ -49,24 +51,24 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.Context("Deployment Status feedback", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(u))
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -81,7 +83,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -92,7 +94,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
@@ -102,7 +104,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Update Deployment status on spoke
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,19 +113,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 2
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests) != 1 {
|
||||
return fmt.Errorf("The size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
|
||||
values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values
|
||||
@@ -152,11 +154,11 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(values, expectedValues) {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", values)
|
||||
}
|
||||
|
||||
if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) {
|
||||
return fmt.Errorf("Status sync condition should be True")
|
||||
return fmt.Errorf("status sync condition should be True")
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -164,7 +166,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
|
||||
// Update replica of deployment
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -173,19 +175,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 3
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if the status of deployment is synced on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests) != 1 {
|
||||
return fmt.Errorf("The size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
|
||||
values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values
|
||||
@@ -214,11 +216,11 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(values, expectedValues) {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", values)
|
||||
}
|
||||
|
||||
if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) {
|
||||
return fmt.Errorf("Status sync condition should be True")
|
||||
return fmt.Errorf("status sync condition should be True")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -231,7 +233,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -252,16 +254,16 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -273,19 +275,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests) != 1 {
|
||||
return fmt.Errorf("The size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
|
||||
values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values
|
||||
@@ -300,11 +302,11 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(values, expectedValues) {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", values)
|
||||
}
|
||||
|
||||
if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionFalse}) {
|
||||
return fmt.Errorf("Status sync condition should be False")
|
||||
return fmt.Errorf("status sync condition should be False")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -312,7 +314,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should return none for resources with no wellKnowne status", func() {
|
||||
sa, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa")
|
||||
sa, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "sa")
|
||||
work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, util.ToManifest(sa))
|
||||
|
||||
work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
@@ -320,7 +322,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -333,7 +335,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "",
|
||||
Resource: "serviceaccounts",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "sa",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -344,17 +346,17 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Update Deployment status on spoke
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -363,19 +365,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
deploy.Status.Replicas = 3
|
||||
deploy.Status.ReadyReplicas = 2
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests) != 2 {
|
||||
return fmt.Errorf("The size of resource status is not correct, expect to be 2 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
return fmt.Errorf("the size of resource status is not correct, expect to be 2 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
|
||||
values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values
|
||||
@@ -404,15 +406,15 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(values, expectedValues) {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", values)
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) != 0 {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values)
|
||||
}
|
||||
|
||||
if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse}) {
|
||||
return fmt.Errorf("Status sync condition should be True")
|
||||
return fmt.Errorf("status sync condition should be True")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -425,7 +427,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -442,27 +444,27 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
u, _, err := util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(u))
|
||||
|
||||
err = features.DefaultSpokeWorkMutableFeatureGate.Set(fmt.Sprintf("%s=true", ocmfeature.RawFeedbackJsonString))
|
||||
err = features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=true", ocmfeature.RawFeedbackJsonString))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -477,7 +479,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
FeedbackRules: []workapiv1.FeedbackRule{
|
||||
@@ -494,16 +496,16 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -515,19 +517,19 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Check if we get status of deployment on work api
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(work.Status.ResourceStatus.Manifests) != 1 {
|
||||
return fmt.Errorf("The size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests))
|
||||
}
|
||||
|
||||
values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values
|
||||
@@ -543,13 +545,13 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() {
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(values, expectedValues) {
|
||||
if len(values) > 0 {
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", *values[0].Value.JsonRaw)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", *values[0].Value.JsonRaw)
|
||||
}
|
||||
return fmt.Errorf("Status feedback values are not correct, we got %v", values)
|
||||
return fmt.Errorf("status feedback values are not correct, we got %v", values)
|
||||
}
|
||||
|
||||
if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) {
|
||||
return fmt.Errorf("Status sync condition should be True")
|
||||
return fmt.Errorf("status sync condition should be True")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -18,8 +18,10 @@ import (
|
||||
|
||||
clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
ocmfeature "open-cluster-management.io/api/feature"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/features"
|
||||
"open-cluster-management.io/ocm/pkg/work/helper"
|
||||
"open-cluster-management.io/ocm/pkg/work/hub"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
@@ -79,9 +81,11 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
err = util.CreateKubeconfigFile(cfg, hubKubeconfigFileName)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
err = workapiv1.AddToScheme(scheme.Scheme)
|
||||
err = workapiv1.Install(scheme.Scheme)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates)
|
||||
|
||||
spokeRestConfig = cfg
|
||||
hubHash = helper.HubHash(spokeRestConfig.Host)
|
||||
spokeKubeClient, err = kubernetes.NewForConfig(cfg)
|
||||
|
||||
@@ -21,11 +21,12 @@ import (
|
||||
|
||||
commonoptions "open-cluster-management.io/ocm/pkg/common/options"
|
||||
"open-cluster-management.io/ocm/pkg/work/spoke"
|
||||
util "open-cluster-management.io/ocm/test/integration/util"
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
var work *workapiv1.ManifestWork
|
||||
var manifests []workapiv1.Manifest
|
||||
@@ -35,28 +36,29 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
o.AgentID = utilrand.String(5)
|
||||
o.AppliedManifestWorkEvictionGracePeriod = 10 * time.Second
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
commOptions.AgentID = utilrand.String(5)
|
||||
|
||||
ns = &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}
|
||||
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "unmanaged-appliedwork", manifests)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "unmanaged-appliedwork", manifests)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
@@ -66,7 +68,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -118,9 +120,9 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
ginkgo.It("should keep old appliemanifestwork with different agent id", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// stop the agent and make it connect to the new hub
|
||||
@@ -129,27 +131,29 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
}
|
||||
|
||||
newOption := spoke.NewWorkloadAgentOptions()
|
||||
newOption.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName
|
||||
newOption.AgentID = utilrand.String(5)
|
||||
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
newCommonOptions := commonoptions.NewAgentOptions()
|
||||
newCommonOptions.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName
|
||||
newCommonOptions.AgentID = utilrand.String(5)
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, newOption)
|
||||
go startWorkAgent(ctx, newOption, newCommonOptions)
|
||||
|
||||
// Create the same manifestwork with the same name on new hub.
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// ensure the resource has two ownerrefs
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,9 +167,9 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
ginkgo.It("should remove old appliemanifestwork if applied again on new hub", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// stop the agent and make it connect to the new hub
|
||||
@@ -174,22 +178,24 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
}
|
||||
|
||||
newOption := spoke.NewWorkloadAgentOptions()
|
||||
newOption.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newOption.AgentOptions.SpokeClusterName = o.AgentOptions.SpokeClusterName
|
||||
newOption.AgentID = o.AgentID
|
||||
newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
newCommonOptions := commonoptions.NewAgentOptions()
|
||||
newCommonOptions.HubKubeconfigFile = newHubKubeConfigFile
|
||||
newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName
|
||||
newCommonOptions.AgentID = commOptions.AgentID
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, newOption)
|
||||
go startWorkAgent(ctx, newOption, newCommonOptions)
|
||||
|
||||
// Create the same manifestwork with the same name.
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = newWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, newWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// ensure the old manifestwork is removed.
|
||||
@@ -206,7 +212,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
|
||||
// ensure the resource has only one ownerref
|
||||
gomega.Eventually(func() error {
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.TODO(), "cm1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -224,9 +230,9 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
ginkgo.Context("Should evict applied work when its manifestwork is missing on the hub", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// stop the agent
|
||||
@@ -243,7 +249,7 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
// restart the work agent
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// ensure the manifestwork is removed.
|
||||
gomega.Eventually(func() error {
|
||||
@@ -262,10 +268,10 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() {
|
||||
// restart the work agent
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// recreate the work on the hub
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// ensure the appliemanifestwork eviction is stopped
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
|
||||
var work *workapiv1.ManifestWork
|
||||
@@ -32,33 +33,34 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// reset manifests
|
||||
manifests = nil
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -66,7 +68,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
var object *unstructured.Unstructured
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(object))
|
||||
})
|
||||
@@ -77,7 +79,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -86,37 +88,37 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update work
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *deploy.Spec.Replicas != 1 {
|
||||
return fmt.Errorf("Replicas should not be changed")
|
||||
return fmt.Errorf("replicas should not be changed")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -128,7 +130,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
var object *unstructured.Unstructured
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
object, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
manifests = append(manifests, util.ToManifest(object))
|
||||
})
|
||||
@@ -139,7 +141,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -148,33 +150,33 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update work
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *deploy.Spec.Replicas != 3 {
|
||||
return fmt.Errorf("Replicas should be updated to 3 but got %d", *deploy.Spec.Replicas)
|
||||
return fmt.Errorf("replicas should be updated to 3 but got %d", *deploy.Spec.Replicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -187,7 +189,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -196,10 +198,10 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update deployment with another field manager
|
||||
@@ -207,42 +209,42 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
patch, err := object.MarshalJSON()
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Patch(
|
||||
context.Background(), "deploy1", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"})
|
||||
_, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Patch(
|
||||
context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// Update deployment by work
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// Failed to apply due to conflict
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// remove the replica field and the apply should work
|
||||
// remove the replica field and apply should work
|
||||
unstructured.RemoveNestedField(object.Object, "spec", "replicas")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
@@ -252,7 +254,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -261,23 +263,23 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Create another work with different fieldmanager
|
||||
objCopy := object.DeepCopy()
|
||||
// work1 does not want to own replica field
|
||||
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
|
||||
work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
{
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -290,32 +292,32 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Update deployment replica by work should work since this work still owns the replicas field
|
||||
err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// This should work since this work still own replicas
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -331,18 +333,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.Workload.Manifests[0] = util.ToManifest(object)
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
// This should work since this work still own replicas
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
@@ -352,7 +354,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -361,23 +363,23 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// Create another work with different fieldmanager
|
||||
objCopy := object.DeepCopy()
|
||||
// work1 does not want to own replica field
|
||||
unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas")
|
||||
work1 := util.NewManifestWork(o.AgentOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)})
|
||||
work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{
|
||||
{
|
||||
ResourceIdentifier: workapiv1.ResourceIdentifier{
|
||||
Group: "apps",
|
||||
Resource: "deployments",
|
||||
Namespace: o.AgentOptions.SpokeClusterName,
|
||||
Namespace: commOptions.SpokeClusterName,
|
||||
Name: "deploy1",
|
||||
},
|
||||
UpdateStrategy: &workapiv1.UpdateStrategy{
|
||||
@@ -390,14 +392,14 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
},
|
||||
}
|
||||
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work1.Namespace, work1.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -411,18 +413,18 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() {
|
||||
|
||||
// update deleteOption of the first work
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan}
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(o.AgentOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -24,8 +24,9 @@ import (
|
||||
"open-cluster-management.io/ocm/test/integration/util"
|
||||
)
|
||||
|
||||
func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions) {
|
||||
err := o.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
|
||||
func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) {
|
||||
agentConfig := spoke.NewWorkAgentConfig(commOption, o)
|
||||
err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{
|
||||
KubeConfig: spokeRestConfig,
|
||||
EventRecorder: util.NewIntegrationTestEventRecorder("integration"),
|
||||
})
|
||||
@@ -34,6 +35,7 @@ func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions) {
|
||||
|
||||
var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
var o *spoke.WorkloadAgentOptions
|
||||
var commOptions *commonoptions.AgentOptions
|
||||
var cancel context.CancelFunc
|
||||
|
||||
var work *workapiv1.ManifestWork
|
||||
@@ -44,50 +46,51 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
o = spoke.NewWorkloadAgentOptions()
|
||||
o.HubKubeconfigFile = hubKubeconfigFileName
|
||||
o.AgentOptions = commonoptions.NewAgentOptions()
|
||||
o.AgentOptions.SpokeClusterName = utilrand.String(5)
|
||||
o.StatusSyncInterval = 3 * time.Second
|
||||
o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second
|
||||
|
||||
commOptions = commonoptions.NewAgentOptions()
|
||||
commOptions.HubKubeconfigFile = hubKubeconfigFileName
|
||||
commOptions.SpokeClusterName = utilrand.String(5)
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
ns.Name = o.AgentOptions.SpokeClusterName
|
||||
ns.Name = commOptions.SpokeClusterName
|
||||
_, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
go startWorkAgent(ctx, o)
|
||||
go startWorkAgent(ctx, o, commOptions)
|
||||
|
||||
// reset manifests
|
||||
manifests = nil
|
||||
})
|
||||
|
||||
ginkgo.JustBeforeEach(func() {
|
||||
work = util.NewManifestWork(o.AgentOptions.SpokeClusterName, "", manifests)
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests)
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{})
|
||||
appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
if !errors.IsNotFound(err) {
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("work %s in namespace %s still exists", work.Name, o.AgentOptions.SpokeClusterName)
|
||||
return fmt.Errorf("work %s in namespace %s still exists", work.Name, commOptions.SpokeClusterName)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.AgentOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
if cancel != nil {
|
||||
@@ -98,33 +101,33 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.Context("With a single manifest", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should create work and then apply it successfully", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("should update work and then apply it successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
}
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -146,14 +149,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete work successfully", func() {
|
||||
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -164,44 +167,44 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap("non-existent-namespace", "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, nil)),
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should create work and then apply it successfully", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
})
|
||||
|
||||
ginkgo.It("should update work and then apply it successfully", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionFalse,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse,
|
||||
[]metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
newManifests := []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"x": "y"}, nil)),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)),
|
||||
}
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// check if Available status is updated or not
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// check if resource created by stale manifest is deleted once it is removed from applied resource list
|
||||
@@ -220,14 +223,14 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm3", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete work successfully", func() {
|
||||
util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -251,7 +254,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
objects = append(objects, obj)
|
||||
|
||||
// cr
|
||||
obj, gvr, err = util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err = util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, obj)
|
||||
@@ -262,9 +265,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should create CRD and CR successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
@@ -278,9 +281,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should merge annotation of existing CR", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
@@ -293,7 +296,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update object label
|
||||
obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
|
||||
@@ -328,9 +331,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should keep the finalizer unchanged of existing CR", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
@@ -343,7 +346,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// update object finalizer
|
||||
obj, gvr, err := util.GuestbookCr(o.AgentOptions.SpokeClusterName, "guestbook1")
|
||||
obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
cr, err := util.GetResource(obj.GetNamespace(), obj.GetName(), gvr, spokeDynamicClient)
|
||||
@@ -392,9 +395,9 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should delete CRD and CR successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
var namespaces, names []string
|
||||
@@ -407,17 +410,17 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
// delete manifest work
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// wait for deletion of manifest work
|
||||
gomega.Eventually(func() bool {
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
_, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err)
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue())
|
||||
|
||||
// Once manifest work is not found, its relating appliedmanifestwork will be evicted, and finally,
|
||||
// all CRs/CRD should been deleted too
|
||||
// all CRs/CRD should be deleted too
|
||||
gomega.Eventually(func() error {
|
||||
for i := range gvrs {
|
||||
_, err := util.GetResource(namespaces[i], names[i], gvrs[i], spokeDynamicClient)
|
||||
@@ -446,19 +449,19 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
gvrs = nil
|
||||
objects = nil
|
||||
|
||||
u, gvr := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "sa")
|
||||
u, gvr := util.NewServiceAccount(commOptions.SpokeClusterName, "sa")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr = util.NewRole(o.AgentOptions.SpokeClusterName, "role1")
|
||||
u, gvr = util.NewRole(commOptions.SpokeClusterName, "role1")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr = util.NewRoleBinding(o.AgentOptions.SpokeClusterName, "rolebinding1", "sa", "role1")
|
||||
u, gvr = util.NewRoleBinding(commOptions.SpokeClusterName, "rolebinding1", "sa", "role1")
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
|
||||
u, gvr, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "sa")
|
||||
u, gvr, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
gvrs = append(gvrs, gvr)
|
||||
objects = append(objects, u)
|
||||
@@ -469,10 +472,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should create Service Account, Role, RoleBinding and Deployment successfully", func() {
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
@@ -488,15 +491,15 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.It("should update Service Account and Deployment successfully", func() {
|
||||
ginkgo.By("check condition status in work status")
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue},
|
||||
eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("check existence of all maintained resources")
|
||||
var namespaces, names []string
|
||||
@@ -513,13 +516,13 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
ginkgo.By("update manifests in work")
|
||||
oldServiceAccount := objects[0]
|
||||
gvrs[0], gvrs[3] = gvrs[3], gvrs[0]
|
||||
u, _ := util.NewServiceAccount(o.AgentOptions.SpokeClusterName, "admin")
|
||||
u, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "admin")
|
||||
objects[3] = u
|
||||
u, _, err = util.NewDeployment(o.AgentOptions.SpokeClusterName, "deploy1", "admin")
|
||||
u, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "admin")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
objects[0] = u
|
||||
|
||||
newManifests := []workapiv1.Manifest{}
|
||||
var newManifests []workapiv1.Manifest
|
||||
for _, obj := range objects {
|
||||
newManifests = append(newManifests, util.ToManifest(obj))
|
||||
}
|
||||
@@ -529,10 +532,10 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
updateTime := metav1.Now()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = newManifests
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ginkgo.By("check existence of all maintained resources")
|
||||
@@ -546,7 +549,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.By("check if deployment is updated")
|
||||
gomega.Eventually(func() error {
|
||||
u, err := util.GetResource(o.AgentOptions.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
|
||||
u, err := util.GetResource(commOptions.SpokeClusterName, objects[0].GetName(), gvrs[0], spokeDynamicClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -560,7 +563,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
|
||||
ginkgo.By("check if LastTransitionTime is updated")
|
||||
gomega.Eventually(func() error {
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -589,8 +592,8 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
ginkgo.By("check if applied resources in status are updated")
|
||||
util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -604,29 +607,29 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
var finalizer = "cluster.open-cluster-management.io/testing"
|
||||
ginkgo.BeforeEach(func() {
|
||||
manifests = []workapiv1.Manifest{
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(o.AgentOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{finalizer})),
|
||||
util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm3", map[string]string{"e": "f"}, []string{finalizer})),
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
err = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1", "cm2", "cm3")
|
||||
err = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, "cm1", "cm2", "cm3")
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
ginkgo.It("should remove applied resource for stale manifest from list once the resource is gone", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
work.Spec.Workload.Manifests = manifests[1:]
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(o.AgentOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
@@ -638,7 +641,7 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
// remove finalizers of cm1
|
||||
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, o.AgentOptions.SpokeClusterName, "cm1")
|
||||
_ = util.RemoveConfigmapFinalizers(spokeKubeClient, commOptions.SpokeClusterName, "cm1")
|
||||
}()
|
||||
|
||||
// check if resource created by stale manifest is deleted once it is removed from applied resource list
|
||||
@@ -657,16 +660,16 @@ var _ = ginkgo.Describe("ManifestWork", func() {
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(o.AgentOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
_, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{})
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
|
||||
ginkgo.It("should delete manifest work eventually after all applied resources are gone", func() {
|
||||
util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue,
|
||||
util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue,
|
||||
[]metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval)
|
||||
|
||||
err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{})
|
||||
|
||||
Reference in New Issue
Block a user