From 399a6b9a73271b0a0b43aac2f086466d874ed290 Mon Sep 17 00:00:00 2001 From: "Michael D. Elder" Date: Wed, 3 Jun 2020 19:25:14 -0400 Subject: [PATCH] Adopt ManagedCluster & Klusterlet API in nucleus --- cmd/nucleus/main.go | 2 +- ...ter-management.io_clustermanagers.crd.yaml | 4 +- ...ter-management.io_clustermanagers.crd.yaml | 4 +- ...cluster-management.io_klusterlets.crd.yaml | 29 +- ...rd.yaml~Adopt new API updates for operator | 126 ++++++ ...l~eeb95e939f074ff44e80dff94ef9973d34d8d28a | 126 ++++++ ...cluster-management.io_klusterlets.crd.yaml | 29 +- ...rd.yaml~Adopt new API updates for operator | 126 ++++++ ...l~eeb95e939f074ff44e80dff94ef9973d34d8d28a | 126 ++++++ .../klusterlet.clusterserviceversion.yaml | 3 + go.mod | 2 +- go.sum | 4 +- ...er-management.io_managedclusters.crd.yaml} | 82 ++-- ...uster-management.io_manifestworks.crd.yaml | 52 +-- ...ster-manager-registration-clusterrole.yaml | 6 +- ...ager-registration-webhook-clusterrole.yaml | 4 +- ...ation-webhook-validatingconfiguration.yaml | 8 +- .../klusterlet-registration-deployment.yaml | 2 +- .../klusterlet-work-deployment.yaml | 2 +- pkg/cmd/operator/spoke.go | 6 +- pkg/helpers/helpers_test.go | 18 +- .../clustermanager/bindata/bindata.go | 230 +++++------ pkg/operators/clustermanager/controller.go | 4 +- pkg/operators/klusterlet/bindata/bindata.go | 4 +- pkg/operators/klusterlet/controller.go | 50 +-- pkg/operators/klusterlet/controller_test.go | 34 +- pkg/operators/manager.go | 4 +- test/integration/doc.go | 4 +- test/integration/hub_test.go | 2 +- test/integration/integration_suite_test.go | 2 +- ...er-management.io_managedclusters.crd.yaml} | 82 ++-- .../api/cluster/v1/generated.pb.go | 371 +++++++++--------- .../api/cluster/v1/generated.proto | 97 ++--- .../api/cluster/v1/register.go | 4 +- .../api/cluster/v1/types.go | 121 +++--- .../api/cluster/v1/zz_generated.deepcopy.go | 94 ++--- .../v1/zz_generated.swagger_doc_generated.go | 70 ++-- ...cluster-management.io_klusterlets.crd.yaml | 29 +- ...ter-management.io_clustermanagers.crd.yaml | 4 +- .../api/operator/v1/generated.proto | 40 +- .../api/operator/v1/types.go | 40 +- .../v1/zz_generated.swagger_doc_generated.go | 30 +- ...uster-management.io_manifestworks.crd.yaml | 52 +-- .../api/work/v1/generated.proto | 46 +-- .../api/work/v1/types.go | 60 +-- .../v1/zz_generated.swagger_doc_generated.go | 34 +- vendor/modules.txt | 2 +- 47 files changed, 1395 insertions(+), 876 deletions(-) create mode 100644 deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator create mode 100644 deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a create mode 100644 deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator create mode 100644 deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a rename manifests/cluster-manager/{0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml => 0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml} (60%) rename vendor/github.com/open-cluster-management/api/cluster/v1/{0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml => 0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml} (60%) diff --git a/cmd/nucleus/main.go b/cmd/nucleus/main.go index 7c81f9851..27d03ae54 100644 --- a/cmd/nucleus/main.go +++ b/cmd/nucleus/main.go @@ -50,7 +50,7 @@ func newNucleusCommand() *cobra.Command { } cmd.AddCommand(operator.NewHubOperatorCmd()) - cmd.AddCommand(operator.NewSpokeOperatorCmd()) + cmd.AddCommand(operator.NewKlusterletOperatorCmd()) return cmd } diff --git a/deploy/cluster-manager/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/deploy/cluster-manager/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index d4612e13e..b623aa556 100644 --- a/deploy/cluster-manager/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/deploy/cluster-manager/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -16,7 +16,7 @@ spec: validation: openAPIV3Schema: description: ClusterManager configures the controllers on the hub that govern - registration and work distribution for attached klusterlets. ClusterManager + registration and work distribution for attached Klusterlets. ClusterManager will be only deployed in open-cluster-management-hub namespace. type: object properties: @@ -34,7 +34,7 @@ spec: type: object spec: description: Spec represents a desired deployment configuration of controllers - that govern registration and work distribution for attached klusterlets. + that govern registration and work distribution for attached Klusterlets. type: object properties: registrationImagePullSpec: diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index d4612e13e..b623aa556 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -16,7 +16,7 @@ spec: validation: openAPIV3Schema: description: ClusterManager configures the controllers on the hub that govern - registration and work distribution for attached klusterlets. ClusterManager + registration and work distribution for attached Klusterlets. ClusterManager will be only deployed in open-cluster-management-hub namespace. type: object properties: @@ -34,7 +34,7 @@ spec: type: object spec: description: Spec represents a desired deployment configuration of controllers - that govern registration and work distribution for attached klusterlets. + that govern registration and work distribution for attached Klusterlets. type: object properties: registrationImagePullSpec: diff --git a/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 7a2cc5cde..5bc3e8ad1 100644 --- a/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -34,18 +34,18 @@ spec: metadata: type: object spec: - description: Spec represents the desired deployment configuration of klusterlet + description: Spec represents the desired deployment configuration of Klusterlet agent. type: object properties: clusterName: - description: ClusterName is the name of the spoke cluster to be created - on hub. The spoke agent generates a random name if it is not set, - or discovers the appropriate cluster name on openshift. + description: ClusterName is the name of the managed cluster to be created + on hub. The Klusterlet agent generates a random name if it is not + set, or discovers the appropriate cluster name on openshift. type: string externalServerURLs: description: ExternalServerURLs represents the a list of apiserver urls - and ca bundles that is accessible externally If it is set empty, spoke + and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. type: array items: @@ -55,17 +55,17 @@ spec: properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string namespace: description: Namespace is the namespace to deploy the agent. The namespace must have a prefix of "open-cluster-management-", and if it is not - set, the namespace of "open-cluster-management-spoke" is used to deploy + set, the namespace of "open-cluster-management-agent" is used to deploy agent. type: string registrationImagePullSpec: @@ -77,16 +77,17 @@ spec: of work agent. type: string status: - description: Status represents the current status of klusterlet agent. + description: Status represents the current status of Klusterlet agent. type: object properties: conditions: description: 'Conditions contain the different condition statuses for - this spokecore. Valid condition types are: Applied: components in - spoke is applied. Available: components in spoke are available and - ready to serve. Progressing: components in spoke are in a transitioning - state. Degraded: components in spoke do not match the desired configuration - and only provide degraded service.' + this Klusterlet. Valid condition types are: Applied: components have + been applied in the managed cluster. Available: components in the + managed cluster are available and ready to serve. Progressing: components + in the managed cluster are in a transitioning state. Degraded: components + in the managed cluster do not match the desired configuration and + only provide degraded service.' type: array items: description: StatusCondition contains condition information. diff --git a/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator new file mode 100644 index 000000000..7a2cc5cde --- /dev/null +++ b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator @@ -0,0 +1,126 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: klusterlets.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: Klusterlet + listKind: KlusterletList + plural: klusterlets + singular: klusterlet + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: Klusterlet represents controllers on the managed cluster. When + configured, the Klusterlet requires a secret named of bootstrap-hub-kubeconfig + in the same namespace to allow API requests to the hub for the registration + protocol. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the desired deployment configuration of klusterlet + agent. + type: object + properties: + clusterName: + description: ClusterName is the name of the spoke cluster to be created + on hub. The spoke agent generates a random name if it is not set, + or discovers the appropriate cluster name on openshift. + type: string + externalServerURLs: + description: ExternalServerURLs represents the a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, spoke + cluster has no externally accessible url that hub cluster can visit. + type: array + items: + description: ServerURL represents the apiserver url and ca bundle + that is accessible externally + type: object + properties: + caBundle: + description: CABundle is the ca bundle to connect to apiserver + of the spoke cluster. System certs are used if it is not set. + type: string + format: byte + url: + description: URL is the url of apiserver endpoint of the spoke + cluster. + type: string + namespace: + description: Namespace is the namespace to deploy the agent. The namespace + must have a prefix of "open-cluster-management-", and if it is not + set, the namespace of "open-cluster-management-spoke" is used to deploy + agent. + type: string + registrationImagePullSpec: + description: RegistrationImagePullSpec represents the desired image + configuration of registration agent. + type: string + workImagePullSpec: + description: WorkImagePullSpec represents the desired image configuration + of work agent. + type: string + status: + description: Status represents the current status of klusterlet agent. + type: object + properties: + conditions: + description: 'Conditions contain the different condition statuses for + this spokecore. Valid condition types are: Applied: components in + spoke is applied. Available: components in spoke are available and + ready to serve. Progressing: components in spoke are in a transitioning + state. Degraded: components in spoke do not match the desired configuration + and only provide degraded service.' + type: array + items: + description: StatusCondition contains condition information. + type: object + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + changed from one status to another. + type: string + format: date-time + message: + description: Message is a human-readable message indicating details + about the last status change. + type: string + reason: + description: Reason is a (brief) reason for the condition's last + status change. + type: string + status: + description: Status is the status of the condition. One of True, + False, Unknown. + type: string + type: + description: Type is the type of the cluster condition. + type: string + version: v1 + versions: + - name: v1 + served: true + storage: true + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a new file mode 100644 index 000000000..7a2cc5cde --- /dev/null +++ b/deploy/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a @@ -0,0 +1,126 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: klusterlets.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: Klusterlet + listKind: KlusterletList + plural: klusterlets + singular: klusterlet + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: Klusterlet represents controllers on the managed cluster. When + configured, the Klusterlet requires a secret named of bootstrap-hub-kubeconfig + in the same namespace to allow API requests to the hub for the registration + protocol. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the desired deployment configuration of klusterlet + agent. + type: object + properties: + clusterName: + description: ClusterName is the name of the spoke cluster to be created + on hub. The spoke agent generates a random name if it is not set, + or discovers the appropriate cluster name on openshift. + type: string + externalServerURLs: + description: ExternalServerURLs represents the a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, spoke + cluster has no externally accessible url that hub cluster can visit. + type: array + items: + description: ServerURL represents the apiserver url and ca bundle + that is accessible externally + type: object + properties: + caBundle: + description: CABundle is the ca bundle to connect to apiserver + of the spoke cluster. System certs are used if it is not set. + type: string + format: byte + url: + description: URL is the url of apiserver endpoint of the spoke + cluster. + type: string + namespace: + description: Namespace is the namespace to deploy the agent. The namespace + must have a prefix of "open-cluster-management-", and if it is not + set, the namespace of "open-cluster-management-spoke" is used to deploy + agent. + type: string + registrationImagePullSpec: + description: RegistrationImagePullSpec represents the desired image + configuration of registration agent. + type: string + workImagePullSpec: + description: WorkImagePullSpec represents the desired image configuration + of work agent. + type: string + status: + description: Status represents the current status of klusterlet agent. + type: object + properties: + conditions: + description: 'Conditions contain the different condition statuses for + this spokecore. Valid condition types are: Applied: components in + spoke is applied. Available: components in spoke are available and + ready to serve. Progressing: components in spoke are in a transitioning + state. Degraded: components in spoke do not match the desired configuration + and only provide degraded service.' + type: array + items: + description: StatusCondition contains condition information. + type: object + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + changed from one status to another. + type: string + format: date-time + message: + description: Message is a human-readable message indicating details + about the last status change. + type: string + reason: + description: Reason is a (brief) reason for the condition's last + status change. + type: string + status: + description: Status is the status of the condition. One of True, + False, Unknown. + type: string + type: + description: Type is the type of the cluster condition. + type: string + version: v1 + versions: + - name: v1 + served: true + storage: true + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 7a2cc5cde..5bc3e8ad1 100644 --- a/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -34,18 +34,18 @@ spec: metadata: type: object spec: - description: Spec represents the desired deployment configuration of klusterlet + description: Spec represents the desired deployment configuration of Klusterlet agent. type: object properties: clusterName: - description: ClusterName is the name of the spoke cluster to be created - on hub. The spoke agent generates a random name if it is not set, - or discovers the appropriate cluster name on openshift. + description: ClusterName is the name of the managed cluster to be created + on hub. The Klusterlet agent generates a random name if it is not + set, or discovers the appropriate cluster name on openshift. type: string externalServerURLs: description: ExternalServerURLs represents the a list of apiserver urls - and ca bundles that is accessible externally If it is set empty, spoke + and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. type: array items: @@ -55,17 +55,17 @@ spec: properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string namespace: description: Namespace is the namespace to deploy the agent. The namespace must have a prefix of "open-cluster-management-", and if it is not - set, the namespace of "open-cluster-management-spoke" is used to deploy + set, the namespace of "open-cluster-management-agent" is used to deploy agent. type: string registrationImagePullSpec: @@ -77,16 +77,17 @@ spec: of work agent. type: string status: - description: Status represents the current status of klusterlet agent. + description: Status represents the current status of Klusterlet agent. type: object properties: conditions: description: 'Conditions contain the different condition statuses for - this spokecore. Valid condition types are: Applied: components in - spoke is applied. Available: components in spoke are available and - ready to serve. Progressing: components in spoke are in a transitioning - state. Degraded: components in spoke do not match the desired configuration - and only provide degraded service.' + this Klusterlet. Valid condition types are: Applied: components have + been applied in the managed cluster. Available: components in the + managed cluster are available and ready to serve. Progressing: components + in the managed cluster are in a transitioning state. Degraded: components + in the managed cluster do not match the desired configuration and + only provide degraded service.' type: array items: description: StatusCondition contains condition information. diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator new file mode 100644 index 000000000..7a2cc5cde --- /dev/null +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~Adopt new API updates for operator @@ -0,0 +1,126 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: klusterlets.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: Klusterlet + listKind: KlusterletList + plural: klusterlets + singular: klusterlet + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: Klusterlet represents controllers on the managed cluster. When + configured, the Klusterlet requires a secret named of bootstrap-hub-kubeconfig + in the same namespace to allow API requests to the hub for the registration + protocol. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the desired deployment configuration of klusterlet + agent. + type: object + properties: + clusterName: + description: ClusterName is the name of the spoke cluster to be created + on hub. The spoke agent generates a random name if it is not set, + or discovers the appropriate cluster name on openshift. + type: string + externalServerURLs: + description: ExternalServerURLs represents the a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, spoke + cluster has no externally accessible url that hub cluster can visit. + type: array + items: + description: ServerURL represents the apiserver url and ca bundle + that is accessible externally + type: object + properties: + caBundle: + description: CABundle is the ca bundle to connect to apiserver + of the spoke cluster. System certs are used if it is not set. + type: string + format: byte + url: + description: URL is the url of apiserver endpoint of the spoke + cluster. + type: string + namespace: + description: Namespace is the namespace to deploy the agent. The namespace + must have a prefix of "open-cluster-management-", and if it is not + set, the namespace of "open-cluster-management-spoke" is used to deploy + agent. + type: string + registrationImagePullSpec: + description: RegistrationImagePullSpec represents the desired image + configuration of registration agent. + type: string + workImagePullSpec: + description: WorkImagePullSpec represents the desired image configuration + of work agent. + type: string + status: + description: Status represents the current status of klusterlet agent. + type: object + properties: + conditions: + description: 'Conditions contain the different condition statuses for + this spokecore. Valid condition types are: Applied: components in + spoke is applied. Available: components in spoke are available and + ready to serve. Progressing: components in spoke are in a transitioning + state. Degraded: components in spoke do not match the desired configuration + and only provide degraded service.' + type: array + items: + description: StatusCondition contains condition information. + type: object + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + changed from one status to another. + type: string + format: date-time + message: + description: Message is a human-readable message indicating details + about the last status change. + type: string + reason: + description: Reason is a (brief) reason for the condition's last + status change. + type: string + status: + description: Status is the status of the condition. One of True, + False, Unknown. + type: string + type: + description: Type is the type of the cluster condition. + type: string + version: v1 + versions: + - name: v1 + served: true + storage: true + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a new file mode 100644 index 000000000..7a2cc5cde --- /dev/null +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml~eeb95e939f074ff44e80dff94ef9973d34d8d28a @@ -0,0 +1,126 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: klusterlets.operator.open-cluster-management.io +spec: + group: operator.open-cluster-management.io + names: + kind: Klusterlet + listKind: KlusterletList + plural: klusterlets + singular: klusterlet + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: Klusterlet represents controllers on the managed cluster. When + configured, the Klusterlet requires a secret named of bootstrap-hub-kubeconfig + in the same namespace to allow API requests to the hub for the registration + protocol. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the desired deployment configuration of klusterlet + agent. + type: object + properties: + clusterName: + description: ClusterName is the name of the spoke cluster to be created + on hub. The spoke agent generates a random name if it is not set, + or discovers the appropriate cluster name on openshift. + type: string + externalServerURLs: + description: ExternalServerURLs represents the a list of apiserver urls + and ca bundles that is accessible externally If it is set empty, spoke + cluster has no externally accessible url that hub cluster can visit. + type: array + items: + description: ServerURL represents the apiserver url and ca bundle + that is accessible externally + type: object + properties: + caBundle: + description: CABundle is the ca bundle to connect to apiserver + of the spoke cluster. System certs are used if it is not set. + type: string + format: byte + url: + description: URL is the url of apiserver endpoint of the spoke + cluster. + type: string + namespace: + description: Namespace is the namespace to deploy the agent. The namespace + must have a prefix of "open-cluster-management-", and if it is not + set, the namespace of "open-cluster-management-spoke" is used to deploy + agent. + type: string + registrationImagePullSpec: + description: RegistrationImagePullSpec represents the desired image + configuration of registration agent. + type: string + workImagePullSpec: + description: WorkImagePullSpec represents the desired image configuration + of work agent. + type: string + status: + description: Status represents the current status of klusterlet agent. + type: object + properties: + conditions: + description: 'Conditions contain the different condition statuses for + this spokecore. Valid condition types are: Applied: components in + spoke is applied. Available: components in spoke are available and + ready to serve. Progressing: components in spoke are in a transitioning + state. Degraded: components in spoke do not match the desired configuration + and only provide degraded service.' + type: array + items: + description: StatusCondition contains condition information. + type: object + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + changed from one status to another. + type: string + format: date-time + message: + description: Message is a human-readable message indicating details + about the last status change. + type: string + reason: + description: Reason is a (brief) reason for the condition's last + status change. + type: string + status: + description: Status is the status of the condition. One of True, + False, Unknown. + type: string + type: + description: Type is the type of the cluster condition. + type: string + version: v1 + versions: + - name: v1 + served: true + storage: true + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml index f0f2f5349..b69cb963d 100644 --- a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml @@ -33,6 +33,9 @@ spec: - kind: Klusterlet name: klusterlets.operator.open-cluster-management.io version: v1 + - kind: Klusterlet + name: klusterlets.operator.open-cluster-management.io + version: v1 displayName: Klusterlet icon: - base64data: "" diff --git a/go.mod b/go.mod index 451002b07..fb3688468 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/onsi/ginkgo v1.11.0 github.com/onsi/gomega v1.8.1 - github.com/open-cluster-management/api v0.0.0-20200601153054-56b58ce890e1 + github.com/open-cluster-management/api v0.0.0-20200602195039-a516cac2e038 github.com/openshift/api v0.0.0-20200326160804-ecb9283fe820 github.com/openshift/build-machinery-go v0.0.0-20200424080330-082bf86082cc github.com/openshift/library-go v0.0.0-20200414135834-ccc4bb27d032 diff --git a/go.sum b/go.sum index b2cf221b1..dcce5366b 100644 --- a/go.sum +++ b/go.sum @@ -312,8 +312,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/open-cluster-management/api v0.0.0-20200601153054-56b58ce890e1 h1:MCq109q/vwW/5YdliL++J17d/6orA6MDmt2WQOaIpWw= -github.com/open-cluster-management/api v0.0.0-20200601153054-56b58ce890e1/go.mod h1:+vUECYB7WkfCb52r0J7rxgD1mseSGAqGi8rTLLRcbgw= +github.com/open-cluster-management/api v0.0.0-20200602195039-a516cac2e038 h1:ZpSgcQERvBBvfwqhdPbxIami9gZpX+RwjNPqwx4+axY= +github.com/open-cluster-management/api v0.0.0-20200602195039-a516cac2e038/go.mod h1:+vUECYB7WkfCb52r0J7rxgD1mseSGAqGi8rTLLRcbgw= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= diff --git a/manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml b/manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml similarity index 60% rename from manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml rename to manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml index b49894dc5..befc50b2a 100644 --- a/manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml +++ b/manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml @@ -1,30 +1,31 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: spokeclusters.cluster.open-cluster-management.io + name: managedclusters.cluster.open-cluster-management.io spec: group: cluster.open-cluster-management.io names: - kind: SpokeCluster - listKind: SpokeClusterList - plural: spokeclusters - singular: spokecluster + kind: ManagedCluster + listKind: ManagedClusterList + plural: managedclusters + singular: managedcluster scope: "Cluster" subresources: status: {} preserveUnknownFields: false validation: openAPIV3Schema: - description: "SpokeCluster represents the desired state and current status of - spoke cluster. SpokeCluster is a cluster scoped resource. The name is the - cluster UID. \n The cluster join process follows a double opt-in process: - \n 1. agent on spoke cluster creates CSR on hub with cluster UID and agent - name. 2. agent on spoke cluster creates spokecluster on hub. 3. cluster admin - on hub approves the CSR for the spoke's cluster UID and agent name. 4. cluster - admin set spec.acceptSpokeCluster of spokecluster to true. 5. cluster admin - on spoke creates credential of kubeconfig to spoke. \n Once the hub creates - the cluster namespace, the spoke agent pushes the credential to the hub to - use against the spoke's kube-apiserver." + description: "ManagedCluster represents the desired state and current status + of managed cluster. ManagedCluster is a cluster scoped resource. The name + is the cluster UID. \n The cluster join process follows a double opt-in process: + \n 1. agent on managed cluster creates CSR on hub with cluster UID and agent + name. 2. agent on managed cluster creates ManagedCluster on hub. 3. cluster + admin on hub approves the CSR for the ManagedCluster's UID and agent name. + 4. cluster admin sets spec.acceptClient of ManagedCluster to true. 5. cluster + admin on managed cluster creates credential of kubeconfig to hub. \n Once + the hub creates the cluster namespace, the Klusterlet agent on the Managed + Cluster pushes the credential to the hub to use against the managed cluster's + kube-apiserver." type: object properties: apiVersion: @@ -41,68 +42,69 @@ spec: type: object spec: description: Spec represents a desired configuration for the agent on the - spoke cluster. + managed cluster. type: object properties: hubAcceptsClient: - description: AcceptSpokeCluster reprsents that hub accepts the join - of spoke agent. Its default value is false, and can only be set true - when the user on hub has an RBAC rule to UPDATE on the virtual subresource - of spokeclusters/accept. When the vaule is set true, a namespace whose - name is same as the name of SpokeCluster is created on hub representing - the spoke cluster, also role/rolebinding is created on the namespace - to grant the permision of access from agent on spoke. When the value - is set false, the namespace representing the spoke cluster is deleted. + description: hubAcceptsClient represents that hub accepts the join of + Klusterlet agent on the managed cluster to the hub. The default value + is false, and can only be set true when the user on hub has an RBAC + rule to UPDATE on the virtual subresource of managedclusters/accept. + When the value is set true, a namespace whose name is same as the + name of ManagedCluster is created on hub representing the managed + cluster, also role/rolebinding is created on the namespace to grant + the permision of access from agent on managed cluster. When the value + is set false, the namespace representing the managed cluster is deleted. type: boolean leaseDurationSeconds: description: LeaseDurationSeconds is used to coordinate the lease update - time of spoke agents. If its value is zero, the spoke agent will update - its lease per 60s by default + time of Klusterlet agents on the managed cluster. If its value is + zero, the Klusterlet agent will update its lease every 60s by default type: integer format: int32 - spokeClientConfigs: - description: SpokeClientConfigs represents a list of the apiserver address - of the spoke cluster. If it is empty, spoke cluster has no accessible - address to be visited from hub. + managedClusterClientConfigs: + description: ManagedClusterClientConfigs represents a list of the apiserver + address of the managed cluster. If it is empty, managed cluster has + no accessible address to be visited from hub. type: array items: description: ClientConfig represents the apiserver address of the - spoke cluster. TODO include credential to connect to spoke cluster + managed cluster. TODO include credential to connect to managed cluster kube-apiserver type: object properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string status: - description: Status represents the current status of joined spoke cluster + description: Status represents the current status of joined managed cluster type: object properties: allocatable: description: Allocatable represents the total allocatable resources - on the spoke cluster. + on the managed cluster. type: object additionalProperties: type: string capacity: description: Capacity represents the total resource capacity from all - nodeStatuses on the spoke cluster. + nodeStatuses on the managed cluster. type: object additionalProperties: type: string conditions: description: Conditions contains the different condition statuses for - this spoke cluster. + this managed cluster. type: array items: description: StatusCondition contains condition information for a - spoke cluster. + managed cluster. type: object properties: lastTransitionTime: @@ -126,12 +128,12 @@ spec: description: Type is the type of the cluster condition. type: string version: - description: Version represents the kubernetes version of the spoke + description: Version represents the kubernetes version of the managed cluster. type: object properties: kubernetes: - description: Kubernetes is the kubernetes version of spoke cluster + description: Kubernetes is the kubernetes version of managed cluster. type: string version: v1 versions: diff --git a/manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml b/manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml index 0dfcc943f..133c5cc81 100644 --- a/manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +++ b/manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml @@ -17,10 +17,10 @@ spec: validation: openAPIV3Schema: description: ManifestWork represents a manifests workload that hub wants to - deploy on the spoke cluster. A manifest workload is defined as a set of kubernetes - resources. ManifestWork must be created in the cluster namespace on the hub, - so that agent on the corresponding spoke cluster can access this resource - and deploy on the spoke cluster. + deploy on the managed cluster. A manifest workload is defined as a set of + kubernetes resources. ManifestWork must be created in the cluster namespace + on the hub, so that agent on the corresponding managed cluster can access + this resource and deploy on the managed cluster. type: object properties: apiVersion: @@ -37,21 +37,21 @@ spec: type: object spec: description: Spec represents a desired configuration of work to be deployed - on the spoke cluster. + on the managed cluster. type: object properties: workload: description: Workload represents the manifest workload to be deployed - on spoke cluster + on managed cluster type: object properties: manifests: description: Manifests represents a list of kuberenetes resources - to be deployed on the spoke cluster. + to be deployed on the managed cluster. type: array items: description: Manifest represents a resource to be deployed on - spoke cluster + managed cluster type: object x-kubernetes-preserve-unknown-fields: true x-kubernetes-embedded-resource: true @@ -65,7 +65,7 @@ spec: GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed - from spoke cluster. The deleted resource may still be present until + from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. @@ -95,15 +95,15 @@ spec: conditions: description: 'Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload - in ManifestWork is applied successfully on spoke cluster. 2. Progressing - represents workload in ManifestWork is being applied on spoke cluster. - 3. Available represents workload in ManifestWork exists on the spoke + in ManifestWork is applied successfully on managed cluster. 2. Progressing + represents workload in ManifestWork is being applied on managed cluster. + 3. Available represents workload in ManifestWork exists on the managed cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period.' type: array items: description: StatusCondition contains condition information for a - spoke work. + ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -124,35 +124,35 @@ spec: False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceStatus: description: ResourceStatus represents the status of each resource in - manifestwork deployed on spoke cluster. The agent on spoke cluster - syncs the condition from spoke to the hub. + manifestwork deployed on managed cluster. The Klusterlet agent on + managed cluster syncs the condition from managed to the hub. type: object properties: manifests: description: 'Manifests represents the condition of manifests deployed - on spoke cluster. Valid condition types are: 1. Progressing represents - the resource is being applied on spoke cluster. 2. Applied represents - the resource is applied successfully on spoke cluster. 3. Available - represents the resource exists on the spoke cluster. 4. Degraded - represents the current state of resource does not match the desired - state for a certain period.' + on managed cluster. Valid condition types are: 1. Progressing + represents the resource is being applied on managed cluster. 2. + Applied represents the resource is applied successfully on managed + cluster. 3. Available represents the resource exists on the managed + cluster. 4. Degraded represents the current state of resource + does not match the desired state for a certain period.' type: array items: description: ManifestCondition represents the conditions of the - resources deployed on spoke cluster + resources deployed on managed cluster type: object properties: conditions: description: Conditions represents the conditions of this - resource on spoke cluster + resource on managed cluster type: array items: description: StatusCondition contains condition information - for a spoke work. + for a ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -173,7 +173,7 @@ spec: One of True, False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceMeta: description: ResourceMeta represents the gvk, name and namespace diff --git a/manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml b/manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml index 3246b9035..0043b34da 100644 --- a/manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml +++ b/manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml @@ -27,10 +27,10 @@ rules: - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterroles", "roles"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] -# Allow hub to manage spokeclusters +# Allow hub to manage managedclusters - apiGroups: ["cluster.open-cluster-management.io"] - resources: ["spokeclusters"] + resources: ["managedclusters"] verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["cluster.open-cluster-management.io"] - resources: ["spokeclusters/status"] + resources: ["managedclusters/status"] verbs: ["update", "patch"] diff --git a/manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml b/manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml index f236f3763..f53731aa0 100644 --- a/manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml +++ b/manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml @@ -3,11 +3,11 @@ kind: ClusterRole metadata: name: system:open-cluster-management:{{ .ClusterManagerName }}-registration-webhook rules: -# Allow spokecluster admission to get/list/watch configmaps +# Allow managedcluster admission to get/list/watch configmaps - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "watch"] -# Allow spokecluster admission to create subjectaccessreviews +# Allow managedcluster admission to create subjectaccessreviews - apiGroups: ["authorization.k8s.io"] resources: ["subjectaccessreviews"] verbs: ["create"] diff --git a/manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml b/manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml index 0dbc67de8..aad194209 100644 --- a/manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml +++ b/manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml @@ -1,16 +1,16 @@ apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: spokeclustervalidators.admission.cluster.open-cluster-management.io + name: managedclustervalidators.admission.cluster.open-cluster-management.io webhooks: -- name: spokeclustervalidators.admission.cluster.open-cluster-management.io +- name: managedclustervalidators.admission.cluster.open-cluster-management.io failurePolicy: Fail clientConfig: service: # reach the webhook via the registered aggregated API namespace: default name: kubernetes - path: /apis/admission.cluster.open-cluster-management.io/v1/spokeclustervalidators + path: /apis/admission.cluster.open-cluster-management.io/v1/managedclustervalidators rules: - operations: - CREATE @@ -20,7 +20,7 @@ webhooks: apiVersions: - "*" resources: - - spokeclusters + - managedclusters admissionReviewVersions: ["v1beta1"] sideEffects: None timeoutSeconds: 3 diff --git a/manifests/klusterlet/klusterlet-registration-deployment.yaml b/manifests/klusterlet/klusterlet-registration-deployment.yaml index ef151fde0..a88f36741 100644 --- a/manifests/klusterlet/klusterlet-registration-deployment.yaml +++ b/manifests/klusterlet/klusterlet-registration-deployment.yaml @@ -17,7 +17,7 @@ spec: spec: serviceAccountName: {{ .KlusterletName }}-registration-sa containers: - - name: spoke-agent + - name: registration-controller image: {{ .RegistrationImage }} imagePullPolicy: IfNotPresent args: diff --git a/manifests/klusterlet/klusterlet-work-deployment.yaml b/manifests/klusterlet/klusterlet-work-deployment.yaml index 85e56ae7c..32d12173c 100644 --- a/manifests/klusterlet/klusterlet-work-deployment.yaml +++ b/manifests/klusterlet/klusterlet-work-deployment.yaml @@ -17,7 +17,7 @@ spec: spec: serviceAccountName: {{ .KlusterletName }}-work-sa containers: - - name: spoke-agent + - name: รง image: {{ .WorkImage }} imagePullPolicy: IfNotPresent args: diff --git a/pkg/cmd/operator/spoke.go b/pkg/cmd/operator/spoke.go index 375b92b47..d503db8b2 100644 --- a/pkg/cmd/operator/spoke.go +++ b/pkg/cmd/operator/spoke.go @@ -9,12 +9,12 @@ import ( "github.com/open-cluster-management/nucleus/pkg/version" ) -// NewSpokeOperatorCmd generatee a command to start spoke operator -func NewSpokeOperatorCmd() *cobra.Command { +// NewKlusterletOperatorCmd generatee a command to start klusterlet operator +func NewKlusterletOperatorCmd() *cobra.Command { cmd := controllercmd. NewControllerCommandConfig("klusterlet", version.Get(), operators.RunKlusterletOperator). NewCommand() - cmd.Use = "spoke" + cmd.Use = "klusterlet" cmd.Short = "Start the klusterlet operator" return cmd diff --git a/pkg/helpers/helpers_test.go b/pkg/helpers/helpers_test.go index ec2057730..cb38e4df6 100644 --- a/pkg/helpers/helpers_test.go +++ b/pkg/helpers/helpers_test.go @@ -86,13 +86,13 @@ func TestUpdateStatusCondition(t *testing.T) { t.Run(c.name, func(t *testing.T) { fakeOperatorClient := opereatorfake.NewSimpleClientset( &operatorapiv1.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{Name: "testspokecluster"}, + ObjectMeta: metav1.ObjectMeta{Name: "testmanagedcluster"}, Status: operatorapiv1.ClusterManagerStatus{ Conditions: c.startingConditions, }, }, &operatorapiv1.Klusterlet{ - ObjectMeta: metav1.ObjectMeta{Name: "testspokecluster"}, + ObjectMeta: metav1.ObjectMeta{Name: "testmanagedcluster"}, Status: operatorapiv1.KlusterletStatus{ Conditions: c.startingConditions, }, @@ -102,7 +102,7 @@ func TestUpdateStatusCondition(t *testing.T) { hubstatus, updated, err := UpdateClusterManagerStatus( context.TODO(), fakeOperatorClient.OperatorV1().ClusterManagers(), - "testspokecluster", + "testmanagedcluster", UpdateClusterManagerConditionFn(c.newCondition), ) if err != nil { @@ -112,10 +112,10 @@ func TestUpdateStatusCondition(t *testing.T) { t.Errorf("expected %t, but %t", c.expectedUpdated, updated) } - spokestatus, updated, err := UpdateKlusterletStatus( + klusterletstatus, updated, err := UpdateKlusterletStatus( context.TODO(), fakeOperatorClient.OperatorV1().Klusterlets(), - "testspokecluster", + "testmanagedcluster", UpdateKlusterletConditionFn(c.newCondition), ) if err != nil { @@ -135,12 +135,12 @@ func TestUpdateStatusCondition(t *testing.T) { t.Errorf(diff.ObjectDiff(expected, hubactual)) } - spokeactual := spokestatus.Conditions[i] + klusterletactual := klusterletstatus.Conditions[i] if expected.LastTransitionTime == (metav1.Time{}) { - spokeactual.LastTransitionTime = metav1.Time{} + klusterletactual.LastTransitionTime = metav1.Time{} } - if !equality.Semantic.DeepEqual(expected, spokeactual) { - t.Errorf(diff.ObjectDiff(expected, spokeactual)) + if !equality.Semantic.DeepEqual(expected, klusterletactual) { + t.Errorf(diff.ObjectDiff(expected, klusterletactual)) } } }) diff --git a/pkg/operators/clustermanager/bindata/bindata.go b/pkg/operators/clustermanager/bindata/bindata.go index dc5d6a02d..2fdffc870 100644 --- a/pkg/operators/clustermanager/bindata/bindata.go +++ b/pkg/operators/clustermanager/bindata/bindata.go @@ -1,6 +1,6 @@ // Code generated for package bindata by go-bindata DO NOT EDIT. (@generated) // sources: -// manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml +// manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml // manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml // manifests/cluster-manager/cluster-manager-clusterrolebinding.yaml // manifests/cluster-manager/cluster-manager-namespace.yaml @@ -69,33 +69,34 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1beta1 +var _manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: spokeclusters.cluster.open-cluster-management.io + name: managedclusters.cluster.open-cluster-management.io spec: group: cluster.open-cluster-management.io names: - kind: SpokeCluster - listKind: SpokeClusterList - plural: spokeclusters - singular: spokecluster + kind: ManagedCluster + listKind: ManagedClusterList + plural: managedclusters + singular: managedcluster scope: "Cluster" subresources: status: {} preserveUnknownFields: false validation: openAPIV3Schema: - description: "SpokeCluster represents the desired state and current status of - spoke cluster. SpokeCluster is a cluster scoped resource. The name is the - cluster UID. \n The cluster join process follows a double opt-in process: - \n 1. agent on spoke cluster creates CSR on hub with cluster UID and agent - name. 2. agent on spoke cluster creates spokecluster on hub. 3. cluster admin - on hub approves the CSR for the spoke's cluster UID and agent name. 4. cluster - admin set spec.acceptSpokeCluster of spokecluster to true. 5. cluster admin - on spoke creates credential of kubeconfig to spoke. \n Once the hub creates - the cluster namespace, the spoke agent pushes the credential to the hub to - use against the spoke's kube-apiserver." + description: "ManagedCluster represents the desired state and current status + of managed cluster. ManagedCluster is a cluster scoped resource. The name + is the cluster UID. \n The cluster join process follows a double opt-in process: + \n 1. agent on managed cluster creates CSR on hub with cluster UID and agent + name. 2. agent on managed cluster creates ManagedCluster on hub. 3. cluster + admin on hub approves the CSR for the ManagedCluster's UID and agent name. + 4. cluster admin sets spec.acceptClient of ManagedCluster to true. 5. cluster + admin on managed cluster creates credential of kubeconfig to hub. \n Once + the hub creates the cluster namespace, the Klusterlet agent on the Managed + Cluster pushes the credential to the hub to use against the managed cluster's + kube-apiserver." type: object properties: apiVersion: @@ -112,68 +113,69 @@ spec: type: object spec: description: Spec represents a desired configuration for the agent on the - spoke cluster. + managed cluster. type: object properties: hubAcceptsClient: - description: AcceptSpokeCluster reprsents that hub accepts the join - of spoke agent. Its default value is false, and can only be set true - when the user on hub has an RBAC rule to UPDATE on the virtual subresource - of spokeclusters/accept. When the vaule is set true, a namespace whose - name is same as the name of SpokeCluster is created on hub representing - the spoke cluster, also role/rolebinding is created on the namespace - to grant the permision of access from agent on spoke. When the value - is set false, the namespace representing the spoke cluster is deleted. + description: hubAcceptsClient represents that hub accepts the join of + Klusterlet agent on the managed cluster to the hub. The default value + is false, and can only be set true when the user on hub has an RBAC + rule to UPDATE on the virtual subresource of managedclusters/accept. + When the value is set true, a namespace whose name is same as the + name of ManagedCluster is created on hub representing the managed + cluster, also role/rolebinding is created on the namespace to grant + the permision of access from agent on managed cluster. When the value + is set false, the namespace representing the managed cluster is deleted. type: boolean leaseDurationSeconds: description: LeaseDurationSeconds is used to coordinate the lease update - time of spoke agents. If its value is zero, the spoke agent will update - its lease per 60s by default + time of Klusterlet agents on the managed cluster. If its value is + zero, the Klusterlet agent will update its lease every 60s by default type: integer format: int32 - spokeClientConfigs: - description: SpokeClientConfigs represents a list of the apiserver address - of the spoke cluster. If it is empty, spoke cluster has no accessible - address to be visited from hub. + managedClusterClientConfigs: + description: ManagedClusterClientConfigs represents a list of the apiserver + address of the managed cluster. If it is empty, managed cluster has + no accessible address to be visited from hub. type: array items: description: ClientConfig represents the apiserver address of the - spoke cluster. TODO include credential to connect to spoke cluster + managed cluster. TODO include credential to connect to managed cluster kube-apiserver type: object properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string status: - description: Status represents the current status of joined spoke cluster + description: Status represents the current status of joined managed cluster type: object properties: allocatable: description: Allocatable represents the total allocatable resources - on the spoke cluster. + on the managed cluster. type: object additionalProperties: type: string capacity: description: Capacity represents the total resource capacity from all - nodeStatuses on the spoke cluster. + nodeStatuses on the managed cluster. type: object additionalProperties: type: string conditions: description: Conditions contains the different condition statuses for - this spoke cluster. + this managed cluster. type: array items: description: StatusCondition contains condition information for a - spoke cluster. + managed cluster. type: object properties: lastTransitionTime: @@ -197,12 +199,12 @@ spec: description: Type is the type of the cluster condition. type: string version: - description: Version represents the kubernetes version of the spoke + description: Version represents the kubernetes version of the managed cluster. type: object properties: kubernetes: - description: Kubernetes is the kubernetes version of spoke cluster + description: Kubernetes is the kubernetes version of managed cluster. type: string version: v1 versions: @@ -217,17 +219,17 @@ status: storedVersions: [] `) -func manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYamlBytes() ([]byte, error) { - return _manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml, nil +func manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYamlBytes() ([]byte, error) { + return _manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYaml, nil } -func manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml() (*asset, error) { - bytes, err := manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYamlBytes() +func manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYaml() (*asset, error) { + bytes, err := manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -251,10 +253,10 @@ spec: validation: openAPIV3Schema: description: ManifestWork represents a manifests workload that hub wants to - deploy on the spoke cluster. A manifest workload is defined as a set of kubernetes - resources. ManifestWork must be created in the cluster namespace on the hub, - so that agent on the corresponding spoke cluster can access this resource - and deploy on the spoke cluster. + deploy on the managed cluster. A manifest workload is defined as a set of + kubernetes resources. ManifestWork must be created in the cluster namespace + on the hub, so that agent on the corresponding managed cluster can access + this resource and deploy on the managed cluster. type: object properties: apiVersion: @@ -271,21 +273,21 @@ spec: type: object spec: description: Spec represents a desired configuration of work to be deployed - on the spoke cluster. + on the managed cluster. type: object properties: workload: description: Workload represents the manifest workload to be deployed - on spoke cluster + on managed cluster type: object properties: manifests: description: Manifests represents a list of kuberenetes resources - to be deployed on the spoke cluster. + to be deployed on the managed cluster. type: array items: description: Manifest represents a resource to be deployed on - spoke cluster + managed cluster type: object x-kubernetes-preserve-unknown-fields: true x-kubernetes-embedded-resource: true @@ -299,7 +301,7 @@ spec: GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed - from spoke cluster. The deleted resource may still be present until + from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. @@ -329,15 +331,15 @@ spec: conditions: description: 'Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload - in ManifestWork is applied successfully on spoke cluster. 2. Progressing - represents workload in ManifestWork is being applied on spoke cluster. - 3. Available represents workload in ManifestWork exists on the spoke + in ManifestWork is applied successfully on managed cluster. 2. Progressing + represents workload in ManifestWork is being applied on managed cluster. + 3. Available represents workload in ManifestWork exists on the managed cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period.' type: array items: description: StatusCondition contains condition information for a - spoke work. + ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -358,35 +360,35 @@ spec: False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceStatus: description: ResourceStatus represents the status of each resource in - manifestwork deployed on spoke cluster. The agent on spoke cluster - syncs the condition from spoke to the hub. + manifestwork deployed on managed cluster. The Klusterlet agent on + managed cluster syncs the condition from managed to the hub. type: object properties: manifests: description: 'Manifests represents the condition of manifests deployed - on spoke cluster. Valid condition types are: 1. Progressing represents - the resource is being applied on spoke cluster. 2. Applied represents - the resource is applied successfully on spoke cluster. 3. Available - represents the resource exists on the spoke cluster. 4. Degraded - represents the current state of resource does not match the desired - state for a certain period.' + on managed cluster. Valid condition types are: 1. Progressing + represents the resource is being applied on managed cluster. 2. + Applied represents the resource is applied successfully on managed + cluster. 3. Available represents the resource exists on the managed + cluster. 4. Degraded represents the current state of resource + does not match the desired state for a certain period.' type: array items: description: ManifestCondition represents the conditions of the - resources deployed on spoke cluster + resources deployed on managed cluster type: object properties: conditions: description: Conditions represents the conditions of this - resource on spoke cluster + resource on managed cluster type: array items: description: StatusCondition contains condition information - for a spoke work. + for a ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -407,7 +409,7 @@ spec: One of True, False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceMeta: description: ResourceMeta represents the gvk, name and namespace @@ -547,12 +549,12 @@ rules: - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterroles", "roles"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] -# Allow hub to manage spokeclusters +# Allow hub to manage managedclusters - apiGroups: ["cluster.open-cluster-management.io"] - resources: ["spokeclusters"] + resources: ["managedclusters"] verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["cluster.open-cluster-management.io"] - resources: ["spokeclusters/status"] + resources: ["managedclusters/status"] verbs: ["update", "patch"] `) @@ -712,11 +714,11 @@ kind: ClusterRole metadata: name: system:open-cluster-management:{{ .ClusterManagerName }}-registration-webhook rules: -# Allow spokecluster admission to get/list/watch configmaps +# Allow managedcluster admission to get/list/watch configmaps - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "list", "watch"] -# Allow spokecluster admission to create subjectaccessreviews +# Allow managedcluster admission to create subjectaccessreviews - apiGroups: ["authorization.k8s.io"] resources: ["subjectaccessreviews"] verbs: ["create"] @@ -913,16 +915,16 @@ func manifestsClusterManagerClusterManagerRegistrationWebhookServiceaccountYaml( var _manifestsClusterManagerClusterManagerRegistrationWebhookValidatingconfigurationYaml = []byte(`apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: spokeclustervalidators.admission.cluster.open-cluster-management.io + name: managedclustervalidators.admission.cluster.open-cluster-management.io webhooks: -- name: spokeclustervalidators.admission.cluster.open-cluster-management.io +- name: managedclustervalidators.admission.cluster.open-cluster-management.io failurePolicy: Fail clientConfig: service: # reach the webhook via the registered aggregated API namespace: default name: kubernetes - path: /apis/admission.cluster.open-cluster-management.io/v1/spokeclustervalidators + path: /apis/admission.cluster.open-cluster-management.io/v1/managedclustervalidators rules: - operations: - CREATE @@ -932,7 +934,7 @@ webhooks: apiVersions: - "*" resources: - - spokeclusters + - managedclusters admissionReviewVersions: ["v1beta1"] sideEffects: None timeoutSeconds: 3 @@ -1005,22 +1007,22 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml": manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml, - "manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": manifestsClusterManager0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, - "manifests/cluster-manager/cluster-manager-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerClusterrolebindingYaml, - "manifests/cluster-manager/cluster-manager-namespace.yaml": manifestsClusterManagerClusterManagerNamespaceYaml, - "manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml": manifestsClusterManagerClusterManagerRegistrationClusterroleYaml, - "manifests/cluster-manager/cluster-manager-registration-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerRegistrationClusterrolebindingYaml, - "manifests/cluster-manager/cluster-manager-registration-deployment.yaml": manifestsClusterManagerClusterManagerRegistrationDeploymentYaml, - "manifests/cluster-manager/cluster-manager-registration-serviceaccount.yaml": manifestsClusterManagerClusterManagerRegistrationServiceaccountYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-apiservice.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookApiserviceYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookClusterroleYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookClusterrolebindingYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-deployment.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookDeploymentYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-secret.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookSecretYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-service.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookServiceYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-serviceaccount.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookServiceaccountYaml, - "manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookValidatingconfigurationYaml, + "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml": manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYaml, + "manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": manifestsClusterManager0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, + "manifests/cluster-manager/cluster-manager-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerClusterrolebindingYaml, + "manifests/cluster-manager/cluster-manager-namespace.yaml": manifestsClusterManagerClusterManagerNamespaceYaml, + "manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml": manifestsClusterManagerClusterManagerRegistrationClusterroleYaml, + "manifests/cluster-manager/cluster-manager-registration-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerRegistrationClusterrolebindingYaml, + "manifests/cluster-manager/cluster-manager-registration-deployment.yaml": manifestsClusterManagerClusterManagerRegistrationDeploymentYaml, + "manifests/cluster-manager/cluster-manager-registration-serviceaccount.yaml": manifestsClusterManagerClusterManagerRegistrationServiceaccountYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-apiservice.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookApiserviceYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-clusterrole.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookClusterroleYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-clusterrolebinding.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookClusterrolebindingYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-deployment.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookDeploymentYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-secret.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookSecretYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-service.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookServiceYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-serviceaccount.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookServiceaccountYaml, + "manifests/cluster-manager/cluster-manager-registration-webhook-validatingconfiguration.yaml": manifestsClusterManagerClusterManagerRegistrationWebhookValidatingconfigurationYaml, } // AssetDir returns the file names below a certain @@ -1066,22 +1068,22 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "manifests": {nil, map[string]*bintree{ "cluster-manager": {nil, map[string]*bintree{ - "0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml": {manifestsClusterManager0000_00_clustersOpenClusterManagementIo_spokeclustersCrdYaml, map[string]*bintree{}}, - "0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": {manifestsClusterManager0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, map[string]*bintree{}}, - "cluster-manager-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerClusterrolebindingYaml, map[string]*bintree{}}, - "cluster-manager-namespace.yaml": {manifestsClusterManagerClusterManagerNamespaceYaml, map[string]*bintree{}}, - "cluster-manager-registration-clusterrole.yaml": {manifestsClusterManagerClusterManagerRegistrationClusterroleYaml, map[string]*bintree{}}, - "cluster-manager-registration-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerRegistrationClusterrolebindingYaml, map[string]*bintree{}}, - "cluster-manager-registration-deployment.yaml": {manifestsClusterManagerClusterManagerRegistrationDeploymentYaml, map[string]*bintree{}}, - "cluster-manager-registration-serviceaccount.yaml": {manifestsClusterManagerClusterManagerRegistrationServiceaccountYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-apiservice.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookApiserviceYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-clusterrole.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookClusterroleYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookClusterrolebindingYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-deployment.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookDeploymentYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-secret.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookSecretYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-service.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookServiceYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-serviceaccount.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookServiceaccountYaml, map[string]*bintree{}}, - "cluster-manager-registration-webhook-validatingconfiguration.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookValidatingconfigurationYaml, map[string]*bintree{}}, + "0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml": {manifestsClusterManager0000_00_clustersOpenClusterManagementIo_managedclustersCrdYaml, map[string]*bintree{}}, + "0000_00_work.open-cluster-management.io_manifestworks.crd.yaml": {manifestsClusterManager0000_00_workOpenClusterManagementIo_manifestworksCrdYaml, map[string]*bintree{}}, + "cluster-manager-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerClusterrolebindingYaml, map[string]*bintree{}}, + "cluster-manager-namespace.yaml": {manifestsClusterManagerClusterManagerNamespaceYaml, map[string]*bintree{}}, + "cluster-manager-registration-clusterrole.yaml": {manifestsClusterManagerClusterManagerRegistrationClusterroleYaml, map[string]*bintree{}}, + "cluster-manager-registration-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerRegistrationClusterrolebindingYaml, map[string]*bintree{}}, + "cluster-manager-registration-deployment.yaml": {manifestsClusterManagerClusterManagerRegistrationDeploymentYaml, map[string]*bintree{}}, + "cluster-manager-registration-serviceaccount.yaml": {manifestsClusterManagerClusterManagerRegistrationServiceaccountYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-apiservice.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookApiserviceYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-clusterrole.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookClusterroleYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-clusterrolebinding.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookClusterrolebindingYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-deployment.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookDeploymentYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-secret.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookSecretYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-service.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookServiceYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-serviceaccount.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookServiceaccountYaml, map[string]*bintree{}}, + "cluster-manager-registration-webhook-validatingconfiguration.yaml": {manifestsClusterManagerClusterManagerRegistrationWebhookValidatingconfigurationYaml, map[string]*bintree{}}, }}, }}, }} diff --git a/pkg/operators/clustermanager/controller.go b/pkg/operators/clustermanager/controller.go index 981429951..450c24305 100644 --- a/pkg/operators/clustermanager/controller.go +++ b/pkg/operators/clustermanager/controller.go @@ -34,10 +34,10 @@ import ( var ( crdNames = []string{ "manifestworks.work.open-cluster-management.io", - "spokeclusters.cluster.open-cluster-management.io", + "managedclusters.cluster.open-cluster-management.io", } staticResourceFiles = []string{ - "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml", + "manifests/cluster-manager/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml", "manifests/cluster-manager/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml", "manifests/cluster-manager/cluster-manager-registration-clusterrole.yaml", "manifests/cluster-manager/cluster-manager-registration-clusterrolebinding.yaml", diff --git a/pkg/operators/klusterlet/bindata/bindata.go b/pkg/operators/klusterlet/bindata/bindata.go index d4737b4a6..d143846ae 100644 --- a/pkg/operators/klusterlet/bindata/bindata.go +++ b/pkg/operators/klusterlet/bindata/bindata.go @@ -142,7 +142,7 @@ spec: spec: serviceAccountName: {{ .KlusterletName }}-registration-sa containers: - - name: spoke-agent + - name: registration-controller image: {{ .RegistrationImage }} imagePullPolicy: IfNotPresent args: @@ -402,7 +402,7 @@ spec: spec: serviceAccountName: {{ .KlusterletName }}-work-sa containers: - - name: spoke-agent + - name: รง image: {{ .WorkImage }} imagePullPolicy: IfNotPresent args: diff --git a/pkg/operators/klusterlet/controller.go b/pkg/operators/klusterlet/controller.go index ce0088ac4..8fdc045e1 100644 --- a/pkg/operators/klusterlet/controller.go +++ b/pkg/operators/klusterlet/controller.go @@ -30,12 +30,12 @@ import ( ) const ( - klusterletFinalizer = "operator.open-cluster-management.io/klusterlet-cleanup" - bootstrapHubKubeConfigSecret = "bootstrap-hub-kubeconfig" - hubKubeConfigSecret = "hub-kubeconfig-secret" - klusterletNamespace = "open-cluster-management-agent" - klusterletApplied = "Applied" - spokeRegistrationDegraded = "SpokeRegistrationDegraded" + klusterletFinalizer = "operator.open-cluster-management.io/klusterlet-cleanup" + bootstrapHubKubeConfigSecret = "bootstrap-hub-kubeconfig" + hubKubeConfigSecret = "hub-kubeconfig-secret" + klusterletNamespace = "open-cluster-management-agent" + klusterletApplied = "Applied" + klusterletRegistrationDegraded = "KlusterletRegistrationDegraded" ) var ( @@ -80,8 +80,8 @@ func NewKlusterletController( ToController("KlusterletController", recorder) } -// spokeConfig is used to render the template of hub manifests -type spokeConfig struct { +// klusterletConfig is used to render the template of hub manifests +type klusterletConfig struct { KlusterletName string KlusterletNamespace string RegistrationImage string @@ -105,7 +105,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto } klusterlet = klusterlet.DeepCopy() - config := spokeConfig{ + config := klusterletConfig{ KlusterletName: klusterlet.Name, KlusterletNamespace: klusterlet.Spec.Namespace, RegistrationImage: klusterlet.Spec.RegistrationImagePullSpec, @@ -136,7 +136,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto } } - // Klusterlet is deleting, we remove its related resources on spoke + // Klusterlet is deleting, we remove its related resources on managed cluster if !klusterlet.DeletionTimestamp.IsZero() { if err := n.cleanUp(ctx, controllerContext, config); err != nil { return err @@ -144,7 +144,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto return n.removeKlusterletFinalizer(ctx, klusterlet) } - // Start deploy spoke core components + // Start deploy klusterlet components // Check if namespace exists _, err = n.kubeClient.CoreV1().Namespaces().Get(ctx, config.KlusterletNamespace, metav1.GetOptions{}) switch { @@ -248,7 +248,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto })) return err } - // TODO store this in the status of the spokecore itself + // TODO store this in the status of the klusterlet itself n.registrationGeneration = generation // Deploy work agent @@ -267,13 +267,13 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto })) return err } - // TODO store this in the status of the spokecore itself + // TODO store this in the status of the klusterlet itself n.workGeneration = generation // if we get here, we have successfully applied everything and should indicate that helpers.UpdateKlusterletStatus(ctx, n.klusterletClient, klusterletName, helpers.UpdateKlusterletConditionFn(operatorapiv1.StatusCondition{ Type: klusterletApplied, Status: metav1.ConditionTrue, Reason: "KlusterletApplied", - Message: "Spoke Core Component Applied", + Message: "Klusterlet Component Applied", })) // now that we have applied all of our logic, we can check to see if the data we expect to have present as indications of @@ -286,8 +286,8 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto clusterName := hubSecret.Data["cluster-name"] if clusterName == nil { helpers.UpdateKlusterletStatus(ctx, n.klusterletClient, klusterletName, helpers.UpdateKlusterletConditionFn(operatorapiv1.StatusCondition{ - Type: spokeRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "ClusterNameMissing", - Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.cluster-name}`. This is set by the spoke registration deployment.", hubSecret.Namespace, hubSecret.Name), + Type: klusterletRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "ClusterNameMissing", + Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.cluster-name}`. This is set by the klusterlet registration deployment.", hubSecret.Namespace, hubSecret.Name), })) return fmt.Errorf("Failed to get cluster name") } @@ -297,21 +297,21 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto // If hub kubeconfig does not exist, return err. if hubSecret.Data["kubeconfig"] == nil { helpers.UpdateKlusterletStatus(ctx, n.klusterletClient, klusterletName, helpers.UpdateKlusterletConditionFn(operatorapiv1.StatusCondition{ - Type: spokeRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "HubKubeconfigMissing", - Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.kubeconfig}`. This is set by the spoke registration deployment, but the CSR must be approved by the cluster-admin on the hub.", hubSecret.Namespace, hubSecret.Name), + Type: klusterletRegistrationDegraded, Status: metav1.ConditionTrue, Reason: "HubKubeconfigMissing", + Message: fmt.Sprintf("Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.kubeconfig}`. This is set by the klusterlet registration deployment, but the CSR must be approved by the cluster-admin on the hub.", hubSecret.Namespace, hubSecret.Name), })) return fmt.Errorf("Failed to get kubeconfig from hub kubeconfig secret") } // TODO it is possible to verify the kubeconfig actually works. helpers.UpdateKlusterletStatus(ctx, n.klusterletClient, klusterletName, helpers.UpdateKlusterletConditionFn(operatorapiv1.StatusCondition{ - Type: spokeRegistrationDegraded, Status: metav1.ConditionFalse, Reason: "RegistrationFunctional", + Type: klusterletRegistrationDegraded, Status: metav1.ConditionFalse, Reason: "RegistrationFunctional", Message: "Registration is managing credentials", })) return nil } -func (n *klusterletController) cleanUp(ctx context.Context, controllerContext factory.SyncContext, config spokeConfig) error { +func (n *klusterletController) cleanUp(ctx context.Context, controllerContext factory.SyncContext, config klusterletConfig) error { // Remove deployment registrationDeployment := fmt.Sprintf("%s-registration-agent", config.KlusterletName) err := n.kubeClient.AppsV1().Deployments(config.KlusterletNamespace).Delete(ctx, registrationDeployment, metav1.DeleteOptions{}) @@ -377,7 +377,7 @@ func readClusterNameFromSecret(secret *corev1.Secret) (string, error) { return string(secret.Data["cluster-name"]), nil } -func readKubuConfigFromSecret(secret *corev1.Secret, config spokeConfig) (string, error) { +func readKubuConfigFromSecret(secret *corev1.Secret, config klusterletConfig) (string, error) { if secret.Data["kubeconfig"] == nil { return "", fmt.Errorf("Unable to find kubeconfig in secret") } @@ -386,12 +386,12 @@ func readKubuConfigFromSecret(secret *corev1.Secret, config spokeConfig) (string } // TODO also read CABundle from ExternalServerURLs and set into registration deployment -func getServersFromKlusterlet(spokeCore *operatorapiv1.Klusterlet) string { - if spokeCore.Spec.ExternalServerURLs == nil { +func getServersFromKlusterlet(klusterlet *operatorapiv1.Klusterlet) string { + if klusterlet.Spec.ExternalServerURLs == nil { return "" } - serverString := make([]string, 0, len(spokeCore.Spec.ExternalServerURLs)) - for _, server := range spokeCore.Spec.ExternalServerURLs { + serverString := make([]string, 0, len(klusterlet.Spec.ExternalServerURLs)) + for _, server := range klusterlet.Spec.ExternalServerURLs { serverString = append(serverString, server.URL) } return strings.Join(serverString, ",") diff --git a/pkg/operators/klusterlet/controller_test.go b/pkg/operators/klusterlet/controller_test.go index 694c67a5c..aeb96814c 100644 --- a/pkg/operators/klusterlet/controller_test.go +++ b/pkg/operators/klusterlet/controller_test.go @@ -182,15 +182,15 @@ func ensureObject(t *testing.T, object runtime.Object, klusterlet *opratorapiv1. } } -// TestSyncDeploy test deployment of spoke components +// TestSyncDeploy test deployment of klusterlet components func TestSyncDeploy(t *testing.T) { - spokeCore := newKlusterlet("testspoke", "testns", "cluster1") + klusterlet := newKlusterlet("klusterlet", "testns", "cluster1") bootStrapSecret := newSecret(bootstrapHubKubeConfigSecret, "testns") hubKubeConfigSecret := newSecret(hubKubeConfigSecret, "testns") hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig") namespace := newNamespace("testns") - controller := newTestController(spokeCore, bootStrapSecret, hubKubeConfigSecret, namespace) - syncContext := newFakeSyncContext(t, "testspoke") + controller := newTestController(klusterlet, bootStrapSecret, hubKubeConfigSecret, namespace) + syncContext := newFakeSyncContext(t, "klusterlet") err := controller.controller.sync(nil, syncContext) if err != nil { @@ -211,7 +211,7 @@ func TestSyncDeploy(t *testing.T) { t.Errorf("Expect 11 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { - ensureObject(t, object, spokeCore) + ensureObject(t, object, klusterlet) } operatorAction := controller.operatorClient.Actions() @@ -226,17 +226,17 @@ func TestSyncDeploy(t *testing.T) { assertGet(t, operatorAction[2], "operator.open-cluster-management.io", "v1", "klusterlets") assertAction(t, operatorAction[3], "update") assertOnlyConditions(t, operatorAction[3].(clienttesting.UpdateActionImpl).Object, - namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionFalse)) + namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(klusterletRegistrationDegraded, metav1.ConditionFalse)) } // TestSyncWithNoSecret test the scenario that bootstrap secret and hub config secret does not exist func TestSyncWithNoSecret(t *testing.T) { - klusterlet := newKlusterlet("testspoke", "testns", "") + klusterlet := newKlusterlet("klusterlet", "testns", "") bootStrapSecret := newSecret(bootstrapHubKubeConfigSecret, "testns") hubSecret := newSecret(hubKubeConfigSecret, "testns") namespace := newNamespace("testns") controller := newTestController(klusterlet, namespace) - syncContext := newFakeSyncContext(t, "testspoke") + syncContext := newFakeSyncContext(t, "klusterlet") // Return err since bootstrap secret does not exist err := controller.controller.sync(nil, syncContext) @@ -284,7 +284,7 @@ func TestSyncWithNoSecret(t *testing.T) { assertGet(t, operatorAction[2], "operator.open-cluster-management.io", "v1", "klusterlets") assertAction(t, operatorAction[3], "update") assertOnlyConditions(t, operatorAction[3].(clienttesting.UpdateActionImpl).Object, - namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionTrue)) + namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(klusterletRegistrationDegraded, metav1.ConditionTrue)) // reset for round 3 controller.operatorClient.ClearActions() @@ -316,17 +316,17 @@ func TestSyncWithNoSecret(t *testing.T) { assertGet(t, operatorAction[1], "operator.open-cluster-management.io", "v1", "klusterlets") assertAction(t, operatorAction[2], "update") assertOnlyConditions(t, operatorAction[2].(clienttesting.UpdateActionImpl).Object, - namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(spokeRegistrationDegraded, metav1.ConditionFalse)) + namedCondition(klusterletApplied, metav1.ConditionTrue), namedCondition(klusterletRegistrationDegraded, metav1.ConditionFalse)) } // TestSyncDelete test cleanup hub deploy func TestSyncDelete(t *testing.T) { - spokeCore := newKlusterlet("testspoke", "testns", "") + klusterlet := newKlusterlet("klusterlet", "testns", "") now := metav1.Now() - spokeCore.ObjectMeta.SetDeletionTimestamp(&now) + klusterlet.ObjectMeta.SetDeletionTimestamp(&now) namespace := newNamespace("testns") - controller := newTestController(spokeCore, namespace) - syncContext := newFakeSyncContext(t, "testspoke") + controller := newTestController(klusterlet, namespace) + syncContext := newFakeSyncContext(t, "klusterlet") err := controller.controller.sync(nil, syncContext) if err != nil { @@ -378,12 +378,12 @@ func TestGetServersFromKlusterlet(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - spokeCore := newKlusterlet("testspoke", "testns", "") + klusterlet := newKlusterlet("klusterlet", "testns", "") for _, server := range c.servers { - spokeCore.Spec.ExternalServerURLs = append(spokeCore.Spec.ExternalServerURLs, + klusterlet.Spec.ExternalServerURLs = append(klusterlet.Spec.ExternalServerURLs, opratorapiv1.ServerURL{URL: server}) } - actual := getServersFromKlusterlet(spokeCore) + actual := getServersFromKlusterlet(klusterlet) if actual != c.expected { t.Errorf("Expected to be same, actual %q, expected %q", actual, c.expected) } diff --git a/pkg/operators/manager.go b/pkg/operators/manager.go index 14384f3a6..85aee23b5 100644 --- a/pkg/operators/manager.go +++ b/pkg/operators/manager.go @@ -18,7 +18,7 @@ import ( // RunClusterManagerOperator starts a new cluster manager operator func RunClusterManagerOperator(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { - // Build kubclient client and informer for spoke cluster + // Build kubclient client and informer for managed cluster kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig) if err != nil { return err @@ -55,7 +55,7 @@ func RunClusterManagerOperator(ctx context.Context, controllerContext *controlle // RunKlusterletOperator starts a new klusterlet operator func RunKlusterletOperator(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { - // Build kubclient client and informer for spoke cluster + // Build kubclient client and informer for managed cluster kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig) if err != nil { return err diff --git a/test/integration/doc.go b/test/integration/doc.go index e0ebcda2b..3b72d483e 100644 --- a/test/integration/doc.go +++ b/test/integration/doc.go @@ -1,4 +1,4 @@ // Package integration provides integration tests for open-cluster-management nucleus, the test cases include -// - TODO deploy/update/remvoe the cluster manager -// - TODO deploy/update/remvoe the spoke agents +// - TODO deploy/update/remove the cluster manager +// - TODO deploy/update/remove the klusterlet package integration diff --git a/test/integration/hub_test.go b/test/integration/hub_test.go index 5e7e3dc01..f5ce0ffd9 100644 --- a/test/integration/hub_test.go +++ b/test/integration/hub_test.go @@ -74,7 +74,7 @@ var _ = ginkgo.Describe("HubCore", func() { hubRegistrationDeployment = fmt.Sprintf("%s-registration-controller", clusterManagerName) hubWebhookDeployment = fmt.Sprintf("%s-registration-webhook", clusterManagerName) webhookSecret = "webhook-serving-cert" - validtingWebhook = "spokeclustervalidators.admission.cluster.open-cluster-management.io" + validtingWebhook = "managedclustervalidators.admission.cluster.open-cluster-management.io" }) ginkgo.It("should have expected resource created successfully", func() { diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index 273fd944c..612c26fbb 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -27,7 +27,7 @@ const ( eventuallyTimeout = 30 // seconds eventuallyInterval = 1 // seconds hubNamespace = "open-cluster-management-hub" - spokeNamespace = "open-cluster-management-spoke" + spokeNamespace = "open-cluster-management-agent" ) var testEnv *envtest.Environment diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml b/vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml similarity index 60% rename from vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml rename to vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml index b49894dc5..befc50b2a 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_spokeclusters.crd.yaml +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml @@ -1,30 +1,31 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: spokeclusters.cluster.open-cluster-management.io + name: managedclusters.cluster.open-cluster-management.io spec: group: cluster.open-cluster-management.io names: - kind: SpokeCluster - listKind: SpokeClusterList - plural: spokeclusters - singular: spokecluster + kind: ManagedCluster + listKind: ManagedClusterList + plural: managedclusters + singular: managedcluster scope: "Cluster" subresources: status: {} preserveUnknownFields: false validation: openAPIV3Schema: - description: "SpokeCluster represents the desired state and current status of - spoke cluster. SpokeCluster is a cluster scoped resource. The name is the - cluster UID. \n The cluster join process follows a double opt-in process: - \n 1. agent on spoke cluster creates CSR on hub with cluster UID and agent - name. 2. agent on spoke cluster creates spokecluster on hub. 3. cluster admin - on hub approves the CSR for the spoke's cluster UID and agent name. 4. cluster - admin set spec.acceptSpokeCluster of spokecluster to true. 5. cluster admin - on spoke creates credential of kubeconfig to spoke. \n Once the hub creates - the cluster namespace, the spoke agent pushes the credential to the hub to - use against the spoke's kube-apiserver." + description: "ManagedCluster represents the desired state and current status + of managed cluster. ManagedCluster is a cluster scoped resource. The name + is the cluster UID. \n The cluster join process follows a double opt-in process: + \n 1. agent on managed cluster creates CSR on hub with cluster UID and agent + name. 2. agent on managed cluster creates ManagedCluster on hub. 3. cluster + admin on hub approves the CSR for the ManagedCluster's UID and agent name. + 4. cluster admin sets spec.acceptClient of ManagedCluster to true. 5. cluster + admin on managed cluster creates credential of kubeconfig to hub. \n Once + the hub creates the cluster namespace, the Klusterlet agent on the Managed + Cluster pushes the credential to the hub to use against the managed cluster's + kube-apiserver." type: object properties: apiVersion: @@ -41,68 +42,69 @@ spec: type: object spec: description: Spec represents a desired configuration for the agent on the - spoke cluster. + managed cluster. type: object properties: hubAcceptsClient: - description: AcceptSpokeCluster reprsents that hub accepts the join - of spoke agent. Its default value is false, and can only be set true - when the user on hub has an RBAC rule to UPDATE on the virtual subresource - of spokeclusters/accept. When the vaule is set true, a namespace whose - name is same as the name of SpokeCluster is created on hub representing - the spoke cluster, also role/rolebinding is created on the namespace - to grant the permision of access from agent on spoke. When the value - is set false, the namespace representing the spoke cluster is deleted. + description: hubAcceptsClient represents that hub accepts the join of + Klusterlet agent on the managed cluster to the hub. The default value + is false, and can only be set true when the user on hub has an RBAC + rule to UPDATE on the virtual subresource of managedclusters/accept. + When the value is set true, a namespace whose name is same as the + name of ManagedCluster is created on hub representing the managed + cluster, also role/rolebinding is created on the namespace to grant + the permision of access from agent on managed cluster. When the value + is set false, the namespace representing the managed cluster is deleted. type: boolean leaseDurationSeconds: description: LeaseDurationSeconds is used to coordinate the lease update - time of spoke agents. If its value is zero, the spoke agent will update - its lease per 60s by default + time of Klusterlet agents on the managed cluster. If its value is + zero, the Klusterlet agent will update its lease every 60s by default type: integer format: int32 - spokeClientConfigs: - description: SpokeClientConfigs represents a list of the apiserver address - of the spoke cluster. If it is empty, spoke cluster has no accessible - address to be visited from hub. + managedClusterClientConfigs: + description: ManagedClusterClientConfigs represents a list of the apiserver + address of the managed cluster. If it is empty, managed cluster has + no accessible address to be visited from hub. type: array items: description: ClientConfig represents the apiserver address of the - spoke cluster. TODO include credential to connect to spoke cluster + managed cluster. TODO include credential to connect to managed cluster kube-apiserver type: object properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string status: - description: Status represents the current status of joined spoke cluster + description: Status represents the current status of joined managed cluster type: object properties: allocatable: description: Allocatable represents the total allocatable resources - on the spoke cluster. + on the managed cluster. type: object additionalProperties: type: string capacity: description: Capacity represents the total resource capacity from all - nodeStatuses on the spoke cluster. + nodeStatuses on the managed cluster. type: object additionalProperties: type: string conditions: description: Conditions contains the different condition statuses for - this spoke cluster. + this managed cluster. type: array items: description: StatusCondition contains condition information for a - spoke cluster. + managed cluster. type: object properties: lastTransitionTime: @@ -126,12 +128,12 @@ spec: description: Type is the type of the cluster condition. type: string version: - description: Version represents the kubernetes version of the spoke + description: Version represents the kubernetes version of the managed cluster. type: object properties: kubernetes: - description: Kubernetes is the kubernetes version of spoke cluster + description: Kubernetes is the kubernetes version of managed cluster. type: string version: v1 versions: diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/generated.pb.go b/vendor/github.com/open-cluster-management/api/cluster/v1/generated.pb.go index bc573a2b0..448143e03 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/generated.pb.go +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/generated.pb.go @@ -59,15 +59,15 @@ func (m *ClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ClientConfig proto.InternalMessageInfo -func (m *SpokeCluster) Reset() { *m = SpokeCluster{} } -func (*SpokeCluster) ProtoMessage() {} -func (*SpokeCluster) Descriptor() ([]byte, []int) { +func (m *ManagedCluster) Reset() { *m = ManagedCluster{} } +func (*ManagedCluster) ProtoMessage() {} +func (*ManagedCluster) Descriptor() ([]byte, []int) { return fileDescriptor_65aa4961edf1a5e7, []int{1} } -func (m *SpokeCluster) XXX_Unmarshal(b []byte) error { +func (m *ManagedCluster) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SpokeCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ManagedCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -75,27 +75,27 @@ func (m *SpokeCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *SpokeCluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpokeCluster.Merge(m, src) +func (m *ManagedCluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedCluster.Merge(m, src) } -func (m *SpokeCluster) XXX_Size() int { +func (m *ManagedCluster) XXX_Size() int { return m.Size() } -func (m *SpokeCluster) XXX_DiscardUnknown() { - xxx_messageInfo_SpokeCluster.DiscardUnknown(m) +func (m *ManagedCluster) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedCluster.DiscardUnknown(m) } -var xxx_messageInfo_SpokeCluster proto.InternalMessageInfo +var xxx_messageInfo_ManagedCluster proto.InternalMessageInfo -func (m *SpokeClusterList) Reset() { *m = SpokeClusterList{} } -func (*SpokeClusterList) ProtoMessage() {} -func (*SpokeClusterList) Descriptor() ([]byte, []int) { +func (m *ManagedClusterList) Reset() { *m = ManagedClusterList{} } +func (*ManagedClusterList) ProtoMessage() {} +func (*ManagedClusterList) Descriptor() ([]byte, []int) { return fileDescriptor_65aa4961edf1a5e7, []int{2} } -func (m *SpokeClusterList) XXX_Unmarshal(b []byte) error { +func (m *ManagedClusterList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SpokeClusterList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ManagedClusterList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -103,27 +103,27 @@ func (m *SpokeClusterList) XXX_Marshal(b []byte, deterministic bool) ([]byte, er } return b[:n], nil } -func (m *SpokeClusterList) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpokeClusterList.Merge(m, src) +func (m *ManagedClusterList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedClusterList.Merge(m, src) } -func (m *SpokeClusterList) XXX_Size() int { +func (m *ManagedClusterList) XXX_Size() int { return m.Size() } -func (m *SpokeClusterList) XXX_DiscardUnknown() { - xxx_messageInfo_SpokeClusterList.DiscardUnknown(m) +func (m *ManagedClusterList) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedClusterList.DiscardUnknown(m) } -var xxx_messageInfo_SpokeClusterList proto.InternalMessageInfo +var xxx_messageInfo_ManagedClusterList proto.InternalMessageInfo -func (m *SpokeClusterSpec) Reset() { *m = SpokeClusterSpec{} } -func (*SpokeClusterSpec) ProtoMessage() {} -func (*SpokeClusterSpec) Descriptor() ([]byte, []int) { +func (m *ManagedClusterSpec) Reset() { *m = ManagedClusterSpec{} } +func (*ManagedClusterSpec) ProtoMessage() {} +func (*ManagedClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptor_65aa4961edf1a5e7, []int{3} } -func (m *SpokeClusterSpec) XXX_Unmarshal(b []byte) error { +func (m *ManagedClusterSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SpokeClusterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ManagedClusterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -131,27 +131,27 @@ func (m *SpokeClusterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, er } return b[:n], nil } -func (m *SpokeClusterSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpokeClusterSpec.Merge(m, src) +func (m *ManagedClusterSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedClusterSpec.Merge(m, src) } -func (m *SpokeClusterSpec) XXX_Size() int { +func (m *ManagedClusterSpec) XXX_Size() int { return m.Size() } -func (m *SpokeClusterSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SpokeClusterSpec.DiscardUnknown(m) +func (m *ManagedClusterSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedClusterSpec.DiscardUnknown(m) } -var xxx_messageInfo_SpokeClusterSpec proto.InternalMessageInfo +var xxx_messageInfo_ManagedClusterSpec proto.InternalMessageInfo -func (m *SpokeClusterStatus) Reset() { *m = SpokeClusterStatus{} } -func (*SpokeClusterStatus) ProtoMessage() {} -func (*SpokeClusterStatus) Descriptor() ([]byte, []int) { +func (m *ManagedClusterStatus) Reset() { *m = ManagedClusterStatus{} } +func (*ManagedClusterStatus) ProtoMessage() {} +func (*ManagedClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptor_65aa4961edf1a5e7, []int{4} } -func (m *SpokeClusterStatus) XXX_Unmarshal(b []byte) error { +func (m *ManagedClusterStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SpokeClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ManagedClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -159,27 +159,27 @@ func (m *SpokeClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *SpokeClusterStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpokeClusterStatus.Merge(m, src) +func (m *ManagedClusterStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedClusterStatus.Merge(m, src) } -func (m *SpokeClusterStatus) XXX_Size() int { +func (m *ManagedClusterStatus) XXX_Size() int { return m.Size() } -func (m *SpokeClusterStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SpokeClusterStatus.DiscardUnknown(m) +func (m *ManagedClusterStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedClusterStatus.DiscardUnknown(m) } -var xxx_messageInfo_SpokeClusterStatus proto.InternalMessageInfo +var xxx_messageInfo_ManagedClusterStatus proto.InternalMessageInfo -func (m *SpokeVersion) Reset() { *m = SpokeVersion{} } -func (*SpokeVersion) ProtoMessage() {} -func (*SpokeVersion) Descriptor() ([]byte, []int) { +func (m *ManagedClusterVersion) Reset() { *m = ManagedClusterVersion{} } +func (*ManagedClusterVersion) ProtoMessage() {} +func (*ManagedClusterVersion) Descriptor() ([]byte, []int) { return fileDescriptor_65aa4961edf1a5e7, []int{5} } -func (m *SpokeVersion) XXX_Unmarshal(b []byte) error { +func (m *ManagedClusterVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SpokeVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ManagedClusterVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -187,17 +187,17 @@ func (m *SpokeVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *SpokeVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpokeVersion.Merge(m, src) +func (m *ManagedClusterVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedClusterVersion.Merge(m, src) } -func (m *SpokeVersion) XXX_Size() int { +func (m *ManagedClusterVersion) XXX_Size() int { return m.Size() } -func (m *SpokeVersion) XXX_DiscardUnknown() { - xxx_messageInfo_SpokeVersion.DiscardUnknown(m) +func (m *ManagedClusterVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedClusterVersion.DiscardUnknown(m) } -var xxx_messageInfo_SpokeVersion proto.InternalMessageInfo +var xxx_messageInfo_ManagedClusterVersion proto.InternalMessageInfo func (m *StatusCondition) Reset() { *m = StatusCondition{} } func (*StatusCondition) ProtoMessage() {} @@ -229,13 +229,13 @@ var xxx_messageInfo_StatusCondition proto.InternalMessageInfo func init() { proto.RegisterType((*ClientConfig)(nil), "github.com.open_cluster_management.api.cluster.v1.ClientConfig") - proto.RegisterType((*SpokeCluster)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeCluster") - proto.RegisterType((*SpokeClusterList)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeClusterList") - proto.RegisterType((*SpokeClusterSpec)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeClusterSpec") - proto.RegisterType((*SpokeClusterStatus)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeClusterStatus") - proto.RegisterMapType((ResourceList)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeClusterStatus.AllocatableEntry") - proto.RegisterMapType((ResourceList)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeClusterStatus.CapacityEntry") - proto.RegisterType((*SpokeVersion)(nil), "github.com.open_cluster_management.api.cluster.v1.SpokeVersion") + proto.RegisterType((*ManagedCluster)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedCluster") + proto.RegisterType((*ManagedClusterList)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterList") + proto.RegisterType((*ManagedClusterSpec)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterSpec") + proto.RegisterType((*ManagedClusterStatus)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterStatus") + proto.RegisterMapType((ResourceList)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterStatus.CapacityEntry") + proto.RegisterType((*ManagedClusterVersion)(nil), "github.com.open_cluster_management.api.cluster.v1.ManagedClusterVersion") proto.RegisterType((*StatusCondition)(nil), "github.com.open_cluster_management.api.cluster.v1.StatusCondition") } @@ -244,65 +244,66 @@ func init() { } var fileDescriptor_65aa4961edf1a5e7 = []byte{ - // 925 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4f, 0x8f, 0xdb, 0x44, - 0x14, 0x8f, 0x93, 0xcd, 0x6e, 0x3a, 0x49, 0x69, 0x34, 0xea, 0xc1, 0x8a, 0xc0, 0x59, 0xe5, 0x80, - 0x02, 0xd2, 0x8e, 0x9b, 0x05, 0xa1, 0x8a, 0x0b, 0x5a, 0x67, 0x8b, 0x40, 0x6c, 0xf9, 0x33, 0x59, - 0x8a, 0x84, 0x10, 0x65, 0xe2, 0xbc, 0x7a, 0xdd, 0xf8, 0x9f, 0x3c, 0xe3, 0x48, 0xb9, 0x21, 0xae, - 0x5c, 0xf8, 0x0a, 0x1c, 0x39, 0x70, 0xe7, 0x13, 0xa0, 0x3d, 0xf6, 0x84, 0x7a, 0x0a, 0x6c, 0xf8, - 0x16, 0x3d, 0x21, 0x8f, 0x27, 0xb6, 0x37, 0x09, 0x65, 0xd9, 0xb6, 0x37, 0xcf, 0xfb, 0xf3, 0xfb, - 0xfd, 0xe6, 0xf9, 0xcd, 0x7b, 0xe8, 0xc8, 0x71, 0xc5, 0x59, 0x32, 0x26, 0x76, 0xe8, 0x9b, 0x61, - 0x04, 0xc1, 0x81, 0xed, 0x25, 0x5c, 0x40, 0x7c, 0xe0, 0xb3, 0x80, 0x39, 0xe0, 0x43, 0x20, 0x4c, - 0x16, 0xb9, 0xa6, 0x32, 0x9b, 0xb3, 0x81, 0xe9, 0x40, 0x00, 0x31, 0x13, 0x30, 0x21, 0x51, 0x1c, - 0x8a, 0x10, 0x0f, 0x0a, 0x08, 0x92, 0x42, 0x3c, 0x54, 0xb1, 0x0f, 0x0b, 0x08, 0xc2, 0x22, 0x97, - 0x28, 0x33, 0x99, 0x0d, 0x3a, 0x07, 0x25, 0x56, 0x27, 0x74, 0x42, 0x53, 0x22, 0x8d, 0x93, 0x47, - 0xf2, 0x24, 0x0f, 0xf2, 0x2b, 0x63, 0xe8, 0xbc, 0x3b, 0xbd, 0xcb, 0x89, 0x1b, 0xa6, 0x42, 0x7c, - 0x66, 0x9f, 0xb9, 0x01, 0xc4, 0x73, 0x33, 0x9a, 0x3a, 0x52, 0x59, 0x0c, 0x3c, 0x4c, 0x62, 0x1b, - 0xd6, 0x75, 0x3d, 0x37, 0x8b, 0x9b, 0x3e, 0x08, 0xb6, 0xe5, 0x36, 0x9d, 0xf7, 0xfe, 0x2d, 0x2b, - 0x4e, 0x02, 0xe1, 0xfa, 0x60, 0x72, 0xfb, 0x0c, 0x7c, 0xb6, 0x9e, 0xd7, 0xfb, 0x0a, 0xb5, 0x86, - 0x9e, 0x0b, 0x81, 0x18, 0x86, 0xc1, 0x23, 0xd7, 0xc1, 0x6f, 0xa0, 0x5a, 0x12, 0x7b, 0xba, 0xb6, - 0xaf, 0xf5, 0x6f, 0x58, 0xcd, 0xf3, 0x45, 0xb7, 0xb2, 0x5c, 0x74, 0x6b, 0x5f, 0xd2, 0x13, 0x9a, - 0xda, 0x71, 0x1f, 0x35, 0x6c, 0x66, 0x25, 0xc1, 0xc4, 0x03, 0xbd, 0xba, 0xaf, 0xf5, 0x5b, 0x56, - 0x6b, 0xb9, 0xe8, 0x36, 0x86, 0x47, 0x99, 0x8d, 0xe6, 0xde, 0xde, 0xef, 0x55, 0xd4, 0x1a, 0x45, - 0xe1, 0x14, 0x86, 0x59, 0xfd, 0xf0, 0x77, 0xa8, 0x91, 0x8a, 0x9f, 0x30, 0xc1, 0x24, 0x7c, 0xf3, - 0xf0, 0x0e, 0xc9, 0x44, 0x93, 0xb2, 0x68, 0x12, 0x4d, 0x9d, 0xd4, 0xc0, 0x49, 0x1a, 0x4d, 0x66, - 0x03, 0xf2, 0xd9, 0xf8, 0x31, 0xd8, 0xe2, 0x3e, 0x08, 0x66, 0x61, 0x25, 0x08, 0x15, 0x36, 0x9a, - 0xa3, 0x62, 0x40, 0x3b, 0x3c, 0x02, 0x5b, 0x0a, 0x6b, 0x1e, 0x0e, 0xc9, 0xff, 0xfe, 0xc1, 0xa4, - 0x2c, 0x78, 0x14, 0x81, 0x6d, 0xb5, 0x14, 0xe1, 0x4e, 0x7a, 0xa2, 0x12, 0x1e, 0xfb, 0x68, 0x97, - 0x0b, 0x26, 0x12, 0xae, 0xd7, 0x24, 0xd1, 0xbd, 0x17, 0x25, 0x92, 0x60, 0xd6, 0x6b, 0x8a, 0x6a, - 0x37, 0x3b, 0x53, 0x45, 0xd2, 0xfb, 0x43, 0x43, 0xed, 0x72, 0xf8, 0x89, 0xcb, 0x05, 0xfe, 0x66, - 0xa3, 0x98, 0xe4, 0x6a, 0xc5, 0x4c, 0xb3, 0x65, 0x29, 0xdb, 0x8a, 0xae, 0xb1, 0xb2, 0x94, 0x0a, - 0x39, 0x41, 0x75, 0x57, 0x80, 0xcf, 0xf5, 0xea, 0x7e, 0xad, 0xdf, 0x3c, 0xfc, 0xe0, 0x05, 0x2f, - 0x68, 0xdd, 0x54, 0x5c, 0xf5, 0x8f, 0x53, 0x54, 0x9a, 0x81, 0xf7, 0x7e, 0xab, 0x5e, 0xbe, 0x58, - 0x5a, 0x62, 0xfc, 0xa3, 0x86, 0x30, 0xcf, 0x8c, 0x45, 0x57, 0x72, 0x5d, 0xbb, 0xb6, 0x90, 0x32, - 0x8e, 0xd5, 0x51, 0x42, 0xf0, 0x68, 0x83, 0x82, 0x6e, 0xa1, 0xc5, 0xc7, 0xa8, 0x7d, 0x96, 0x8c, - 0x8f, 0x6c, 0x1b, 0x22, 0xc1, 0x33, 0x97, 0xec, 0xae, 0x86, 0xa5, 0x2b, 0xa4, 0xf6, 0x47, 0x6b, - 0x7e, 0xba, 0x91, 0x81, 0x3f, 0x47, 0xb7, 0x3d, 0x60, 0x1c, 0x8e, 0x93, 0x98, 0x09, 0x37, 0x0c, - 0x46, 0x60, 0x87, 0xc1, 0x24, 0x6b, 0x9f, 0xba, 0xf5, 0xba, 0x42, 0xba, 0x7d, 0xb2, 0x25, 0x86, - 0x6e, 0xcd, 0xec, 0xfd, 0xb2, 0x8b, 0xf0, 0x66, 0x0b, 0xe1, 0x19, 0x42, 0xa9, 0xdf, 0x4d, 0x43, - 0x57, 0x35, 0xb3, 0xae, 0xf3, 0xf3, 0x24, 0xdc, 0x70, 0x05, 0x55, 0x3c, 0xbb, 0xdc, 0xc4, 0x69, - 0x89, 0x09, 0xff, 0xac, 0xa5, 0x63, 0x21, 0x62, 0xb6, 0x2b, 0xe6, 0xaa, 0x67, 0x46, 0x2f, 0xe5, - 0x51, 0x90, 0xa1, 0x42, 0xbd, 0x17, 0x88, 0x78, 0x6e, 0xdd, 0x59, 0xf5, 0xec, 0xca, 0xfc, 0x6c, - 0xd1, 0x6d, 0x51, 0x35, 0x48, 0xd3, 0x3e, 0xfe, 0xe1, 0xcf, 0xe2, 0xfc, 0x29, 0xf3, 0xe5, 0x3c, - 0xca, 0x22, 0xf1, 0xaf, 0x1a, 0x6a, 0x32, 0xcf, 0x0b, 0x6d, 0x26, 0xd8, 0xd8, 0x03, 0xbd, 0x26, - 0x65, 0x3e, 0x78, 0x39, 0x32, 0x8f, 0x0a, 0xe0, 0x4c, 0xe9, 0x3b, 0x4a, 0x69, 0xb3, 0xe4, 0xf9, - 0x4f, 0xb1, 0x65, 0x7d, 0xf8, 0x31, 0xda, 0x9b, 0x41, 0xcc, 0xdd, 0x30, 0xd0, 0x77, 0xe4, 0x03, - 0xbf, 0xf6, 0x2b, 0x7c, 0x90, 0xc1, 0x58, 0xb7, 0x94, 0xa6, 0x3d, 0x65, 0xa0, 0x2b, 0x82, 0xce, - 0x14, 0xdd, 0xbc, 0x54, 0x68, 0xdc, 0x46, 0xb5, 0x29, 0xcc, 0xb3, 0x2d, 0x40, 0xd3, 0x4f, 0x7c, - 0x8c, 0xea, 0x33, 0xe6, 0x25, 0xa0, 0x86, 0xeb, 0x73, 0xa7, 0x0d, 0x59, 0xed, 0x36, 0xf2, 0x45, - 0xc2, 0x02, 0xe1, 0x8a, 0x39, 0xcd, 0x92, 0xdf, 0xaf, 0xde, 0xd5, 0x3a, 0x01, 0x6a, 0xaf, 0x97, - 0xeb, 0x55, 0xf2, 0xf5, 0x2c, 0xb5, 0x87, 0xd4, 0xad, 0xf1, 0x21, 0x42, 0xd3, 0x64, 0x0c, 0x71, - 0x00, 0x02, 0xb8, 0x5a, 0x74, 0x79, 0x83, 0x7f, 0x92, 0x7b, 0x68, 0x29, 0xaa, 0xb7, 0xa8, 0xa2, - 0x5b, 0x6b, 0x8f, 0x02, 0xef, 0xa3, 0x1d, 0x31, 0x8f, 0x40, 0x21, 0xe4, 0x8b, 0xe2, 0x74, 0x1e, - 0x01, 0x95, 0x1e, 0xfc, 0x6d, 0xbe, 0x28, 0xaa, 0x32, 0xe6, 0xc3, 0xcb, 0x13, 0xfe, 0xd9, 0xa2, - 0x7b, 0xa5, 0x5d, 0x4f, 0x72, 0xce, 0xcb, 0x9b, 0x01, 0xcf, 0x10, 0xf6, 0x18, 0x17, 0xa7, 0x31, - 0x0b, 0xb8, 0xf4, 0x9f, 0xba, 0x3e, 0xa8, 0xa5, 0xf4, 0xf6, 0xd5, 0xd6, 0x41, 0x9a, 0x51, 0x4c, - 0xc5, 0x93, 0x0d, 0x34, 0xba, 0x85, 0x01, 0xbf, 0x89, 0x76, 0x63, 0x60, 0x5c, 0x75, 0xe6, 0x8d, - 0x62, 0x73, 0x51, 0x69, 0xa5, 0xca, 0x8b, 0xdf, 0x42, 0x7b, 0x3e, 0x70, 0xce, 0x1c, 0xd0, 0xeb, - 0x32, 0x30, 0xef, 0xc0, 0xfb, 0x99, 0x99, 0xae, 0xfc, 0x56, 0xff, 0xfc, 0xc2, 0xa8, 0x3c, 0xb9, - 0x30, 0x2a, 0x4f, 0x2f, 0x8c, 0xca, 0xf7, 0x4b, 0x43, 0x3b, 0x5f, 0x1a, 0xda, 0x93, 0xa5, 0xa1, - 0x3d, 0x5d, 0x1a, 0xda, 0x5f, 0x4b, 0x43, 0xfb, 0xe9, 0x6f, 0xa3, 0xf2, 0x75, 0x75, 0x36, 0xf8, - 0x27, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x45, 0x45, 0xa3, 0xfa, 0x09, 0x00, 0x00, + // 938 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xda, 0x71, 0xe2, 0x8e, 0xdd, 0xd6, 0x1a, 0x05, 0xc9, 0x72, 0x61, 0x1d, 0x19, 0x09, + 0x19, 0xa4, 0xcc, 0xd6, 0x01, 0xa1, 0x8a, 0x0b, 0xf2, 0x3a, 0x85, 0xa2, 0x26, 0xfc, 0x99, 0xa6, + 0x80, 0x10, 0xa2, 0x8c, 0xd7, 0x2f, 0x9b, 0xc5, 0xfb, 0x4f, 0x3b, 0xb3, 0x96, 0x7c, 0x43, 0x7c, + 0x02, 0xbe, 0x03, 0x12, 0x57, 0xae, 0x7c, 0x84, 0x1c, 0x38, 0xf4, 0x82, 0x54, 0x71, 0x30, 0xc4, + 0x7c, 0x8b, 0x9e, 0xd0, 0xce, 0x8e, 0xd7, 0x6b, 0xc7, 0x0d, 0xa1, 0x09, 0xb7, 0x9d, 0xdf, 0x7b, + 0xef, 0xf7, 0xfb, 0xed, 0xf3, 0xdb, 0x79, 0x46, 0x3d, 0xdb, 0x11, 0x27, 0xf1, 0x80, 0x58, 0x81, + 0x67, 0x04, 0x21, 0xf8, 0xbb, 0x96, 0x1b, 0x73, 0x01, 0xd1, 0xae, 0xc7, 0x7c, 0x66, 0x83, 0x07, + 0xbe, 0x30, 0x58, 0xe8, 0x18, 0x0a, 0x36, 0xc6, 0x5d, 0xc3, 0x06, 0x1f, 0x22, 0x26, 0x60, 0x48, + 0xc2, 0x28, 0x10, 0x01, 0xee, 0x2e, 0x28, 0x48, 0x42, 0xf1, 0x44, 0xe5, 0x3e, 0x59, 0x50, 0x10, + 0x16, 0x3a, 0x44, 0xc1, 0x64, 0xdc, 0x6d, 0xee, 0xe6, 0x54, 0xed, 0xc0, 0x0e, 0x0c, 0xc9, 0x34, + 0x88, 0x8f, 0xe5, 0x49, 0x1e, 0xe4, 0x53, 0xaa, 0xd0, 0x7c, 0x67, 0x74, 0x8f, 0x13, 0x27, 0x48, + 0x8c, 0x78, 0xcc, 0x3a, 0x71, 0x7c, 0x88, 0x26, 0x46, 0x38, 0xb2, 0xa5, 0xb3, 0x08, 0x78, 0x10, + 0x47, 0x16, 0xac, 0xfa, 0xba, 0xb0, 0x8a, 0x1b, 0x1e, 0x08, 0xb6, 0xe6, 0x6d, 0x9a, 0xef, 0xbe, + 0xa8, 0x2a, 0x8a, 0x7d, 0xe1, 0x78, 0x60, 0x70, 0xeb, 0x04, 0x3c, 0xb6, 0x5a, 0xd7, 0xfe, 0x02, + 0xd5, 0xfa, 0xae, 0x03, 0xbe, 0xe8, 0x07, 0xfe, 0xb1, 0x63, 0xe3, 0xd7, 0x50, 0x29, 0x8e, 0xdc, + 0x86, 0xb6, 0xa3, 0x75, 0x6e, 0x98, 0xd5, 0xd3, 0x69, 0xab, 0x30, 0x9b, 0xb6, 0x4a, 0x8f, 0xe9, + 0x01, 0x4d, 0x70, 0xdc, 0x41, 0x15, 0x8b, 0x99, 0xb1, 0x3f, 0x74, 0xa1, 0x51, 0xdc, 0xd1, 0x3a, + 0x35, 0xb3, 0x36, 0x9b, 0xb6, 0x2a, 0xfd, 0x5e, 0x8a, 0xd1, 0x2c, 0xda, 0xfe, 0xad, 0x88, 0x6e, + 0x1d, 0xca, 0x4e, 0x0e, 0xfb, 0x69, 0x07, 0xf1, 0xb7, 0xa8, 0x92, 0xd8, 0x1f, 0x32, 0xc1, 0xa4, + 0x40, 0x75, 0xef, 0x2e, 0x49, 0x6d, 0x93, 0xbc, 0x6d, 0x12, 0x8e, 0xec, 0x04, 0xe0, 0x24, 0xc9, + 0x26, 0xe3, 0x2e, 0xf9, 0x64, 0xf0, 0x1d, 0x58, 0xe2, 0x10, 0x04, 0x33, 0xb1, 0xb2, 0x84, 0x16, + 0x18, 0xcd, 0x58, 0xb1, 0x8d, 0x36, 0x78, 0x08, 0x96, 0xb4, 0x56, 0xdd, 0xbb, 0x4f, 0xfe, 0xf3, + 0x4f, 0x4c, 0x96, 0x2d, 0x3f, 0x0a, 0xc1, 0x32, 0x6b, 0x4a, 0x72, 0x23, 0x39, 0x51, 0x29, 0x80, + 0x03, 0xb4, 0xc9, 0x05, 0x13, 0x31, 0x6f, 0x94, 0xa4, 0xd4, 0x87, 0x57, 0x97, 0x92, 0x74, 0xe6, + 0x2d, 0x25, 0xb6, 0x99, 0x9e, 0xa9, 0x92, 0x69, 0xff, 0xa1, 0x21, 0xbc, 0x5c, 0x70, 0xe0, 0x70, + 0x81, 0xbf, 0x3e, 0xd7, 0x52, 0x72, 0xb9, 0x96, 0x26, 0xd5, 0xb2, 0xa1, 0x75, 0x25, 0x58, 0x99, + 0x23, 0xb9, 0x76, 0x1e, 0xa3, 0xb2, 0x23, 0xc0, 0xe3, 0x8d, 0xe2, 0x4e, 0xa9, 0x53, 0xdd, 0xeb, + 0x5d, 0xf9, 0x25, 0xcd, 0x9b, 0x4a, 0xad, 0xfc, 0x51, 0xc2, 0x4b, 0x53, 0xfa, 0xf6, 0xef, 0xc5, + 0xd5, 0x97, 0x4b, 0x5a, 0x8d, 0x7f, 0xd6, 0xd0, 0x1d, 0x6f, 0x09, 0xce, 0x8f, 0x2a, 0x6f, 0x68, + 0xd2, 0xd5, 0xfb, 0x2f, 0xe1, 0x2a, 0xcf, 0x63, 0xbe, 0xae, 0x3c, 0xdd, 0x39, 0x7c, 0xb1, 0x16, + 0xbd, 0xc8, 0x08, 0xde, 0x47, 0xf5, 0x93, 0x78, 0xd0, 0xb3, 0x2c, 0x08, 0x05, 0x4f, 0x43, 0x72, + 0x04, 0x2b, 0x66, 0x43, 0x71, 0xd7, 0x1f, 0xac, 0xc4, 0xe9, 0xb9, 0x0a, 0xfc, 0x29, 0xda, 0x76, + 0x81, 0x71, 0xd8, 0x8f, 0x23, 0x26, 0x9c, 0xc0, 0x7f, 0x04, 0x56, 0xe0, 0x0f, 0xd3, 0x09, 0x2b, + 0x9b, 0xaf, 0x2a, 0xa6, 0xed, 0x83, 0x35, 0x39, 0x74, 0x6d, 0x65, 0xfb, 0xd7, 0x4d, 0xb4, 0xbd, + 0x6e, 0xca, 0xf0, 0x18, 0xa1, 0x24, 0xc3, 0x49, 0x92, 0xe7, 0x7d, 0x34, 0x5f, 0xa2, 0x8f, 0x29, + 0x5d, 0x7f, 0x4e, 0xb5, 0xf8, 0x3a, 0x33, 0x88, 0xd3, 0x9c, 0x12, 0xfe, 0x49, 0x4b, 0xee, 0x8f, + 0x90, 0x59, 0x8e, 0x98, 0xa8, 0xa1, 0x7a, 0x7c, 0x4d, 0x5f, 0x0e, 0xe9, 0x2b, 0xde, 0xfb, 0xbe, + 0x88, 0x26, 0xe6, 0xdd, 0xf9, 0x58, 0xcf, 0xe1, 0xe7, 0xd3, 0x56, 0x8d, 0xaa, 0x3b, 0x37, 0x19, + 0xf5, 0x1f, 0xfe, 0x5c, 0x9c, 0x3f, 0x66, 0x9e, 0xbc, 0xba, 0xd2, 0x4c, 0xfc, 0x8b, 0x86, 0xaa, + 0xcc, 0x75, 0x03, 0x8b, 0x09, 0x36, 0x70, 0xa1, 0x51, 0x92, 0x46, 0xbf, 0xbc, 0x2e, 0xa3, 0xbd, + 0x05, 0x75, 0xea, 0xf5, 0x6d, 0xe5, 0xb5, 0x9a, 0x8b, 0xfc, 0xab, 0xdd, 0xbc, 0x43, 0xcc, 0xd1, + 0xd6, 0x18, 0x22, 0xee, 0x04, 0x7e, 0x63, 0x43, 0xde, 0x02, 0x0f, 0xae, 0x6c, 0xf6, 0xf3, 0x94, + 0xcf, 0xbc, 0xad, 0xcc, 0x6d, 0x29, 0x80, 0xce, 0x95, 0x9a, 0x23, 0x74, 0x73, 0xa9, 0xe7, 0xb8, + 0x8e, 0x4a, 0x23, 0x98, 0xa4, 0xbb, 0x83, 0x26, 0x8f, 0x78, 0x1f, 0x95, 0xc7, 0xcc, 0x8d, 0x41, + 0x5d, 0xc8, 0x17, 0xde, 0x4d, 0x64, 0xbe, 0x11, 0xc9, 0x67, 0x31, 0xf3, 0x85, 0x23, 0x26, 0x34, + 0x2d, 0x7e, 0xaf, 0x78, 0x4f, 0x6b, 0xfa, 0xa8, 0xbe, 0xda, 0xb7, 0xff, 0x53, 0xaf, 0xfd, 0x10, + 0xbd, 0xb2, 0xb6, 0x1f, 0x78, 0x0f, 0xa1, 0x51, 0x3c, 0x80, 0xc8, 0x07, 0x01, 0x5c, 0xed, 0xc9, + 0x6c, 0xec, 0x1f, 0x66, 0x11, 0x9a, 0xcb, 0x6a, 0x4f, 0x8b, 0xe8, 0xf6, 0xca, 0xa7, 0x82, 0x77, + 0xd0, 0x86, 0x98, 0x84, 0xa0, 0x18, 0xb2, 0x1d, 0x73, 0x34, 0x09, 0x81, 0xca, 0x08, 0xfe, 0x26, + 0xdb, 0x31, 0x45, 0x99, 0xf3, 0xc1, 0xf2, 0x6a, 0x78, 0x3e, 0x6d, 0x5d, 0xea, 0xaf, 0x02, 0xc9, + 0x34, 0x97, 0x57, 0x0a, 0x1e, 0x23, 0xec, 0x32, 0x2e, 0x8e, 0x22, 0xe6, 0x73, 0x19, 0x3f, 0x72, + 0x3c, 0x50, 0xfb, 0xec, 0xad, 0xcb, 0x6d, 0x91, 0xa4, 0xc2, 0x6c, 0x2a, 0x5f, 0xf8, 0xe0, 0x1c, + 0x1b, 0x5d, 0xa3, 0x80, 0xdf, 0x40, 0x9b, 0x11, 0x30, 0xae, 0x66, 0xf5, 0xc6, 0x62, 0xe5, 0x51, + 0x89, 0x52, 0x15, 0xc5, 0x6f, 0xa2, 0x2d, 0x0f, 0x38, 0x67, 0x36, 0x34, 0xca, 0x32, 0x31, 0x1b, + 0xc5, 0xc3, 0x14, 0xa6, 0xf3, 0xb8, 0xd9, 0x39, 0x3d, 0xd3, 0x0b, 0x4f, 0xcf, 0xf4, 0xc2, 0xb3, + 0x33, 0xbd, 0xf0, 0xfd, 0x4c, 0xd7, 0x4e, 0x67, 0xba, 0xf6, 0x74, 0xa6, 0x6b, 0xcf, 0x66, 0xba, + 0xf6, 0xd7, 0x4c, 0xd7, 0x7e, 0xfc, 0x5b, 0x2f, 0x7c, 0x55, 0x1c, 0x77, 0xff, 0x09, 0x00, 0x00, + 0xff, 0xff, 0xbd, 0x12, 0x9e, 0xec, 0x39, 0x0a, 0x00, 0x00, } func (m *ClientConfig) Marshal() (dAtA []byte, err error) { @@ -340,7 +341,7 @@ func (m *ClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SpokeCluster) Marshal() (dAtA []byte, err error) { +func (m *ManagedCluster) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -350,12 +351,12 @@ func (m *SpokeCluster) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SpokeCluster) MarshalTo(dAtA []byte) (int, error) { +func (m *ManagedCluster) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SpokeCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ManagedCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -393,7 +394,7 @@ func (m *SpokeCluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SpokeClusterList) Marshal() (dAtA []byte, err error) { +func (m *ManagedClusterList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -403,12 +404,12 @@ func (m *SpokeClusterList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SpokeClusterList) MarshalTo(dAtA []byte) (int, error) { +func (m *ManagedClusterList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SpokeClusterList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ManagedClusterList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -440,7 +441,7 @@ func (m *SpokeClusterList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SpokeClusterSpec) Marshal() (dAtA []byte, err error) { +func (m *ManagedClusterSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -450,12 +451,12 @@ func (m *SpokeClusterSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SpokeClusterSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ManagedClusterSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SpokeClusterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ManagedClusterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -471,10 +472,10 @@ func (m *SpokeClusterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x10 - if len(m.SpokeClientConfigs) > 0 { - for iNdEx := len(m.SpokeClientConfigs) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ManagedClusterClientConfigs) > 0 { + for iNdEx := len(m.ManagedClusterClientConfigs) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.SpokeClientConfigs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ManagedClusterClientConfigs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -488,7 +489,7 @@ func (m *SpokeClusterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SpokeClusterStatus) Marshal() (dAtA []byte, err error) { +func (m *ManagedClusterStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -498,12 +499,12 @@ func (m *SpokeClusterStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SpokeClusterStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *ManagedClusterStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SpokeClusterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ManagedClusterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -593,7 +594,7 @@ func (m *SpokeClusterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SpokeVersion) Marshal() (dAtA []byte, err error) { +func (m *ManagedClusterVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -603,12 +604,12 @@ func (m *SpokeVersion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SpokeVersion) MarshalTo(dAtA []byte) (int, error) { +func (m *ManagedClusterVersion) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SpokeVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ManagedClusterVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -700,7 +701,7 @@ func (m *ClientConfig) Size() (n int) { return n } -func (m *SpokeCluster) Size() (n int) { +func (m *ManagedCluster) Size() (n int) { if m == nil { return 0 } @@ -715,7 +716,7 @@ func (m *SpokeCluster) Size() (n int) { return n } -func (m *SpokeClusterList) Size() (n int) { +func (m *ManagedClusterList) Size() (n int) { if m == nil { return 0 } @@ -732,14 +733,14 @@ func (m *SpokeClusterList) Size() (n int) { return n } -func (m *SpokeClusterSpec) Size() (n int) { +func (m *ManagedClusterSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.SpokeClientConfigs) > 0 { - for _, e := range m.SpokeClientConfigs { + if len(m.ManagedClusterClientConfigs) > 0 { + for _, e := range m.ManagedClusterClientConfigs { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -749,7 +750,7 @@ func (m *SpokeClusterSpec) Size() (n int) { return n } -func (m *SpokeClusterStatus) Size() (n int) { +func (m *ManagedClusterStatus) Size() (n int) { if m == nil { return 0 } @@ -784,7 +785,7 @@ func (m *SpokeClusterStatus) Size() (n int) { return n } -func (m *SpokeVersion) Size() (n int) { +func (m *ManagedClusterVersion) Size() (n int) { if m == nil { return 0 } @@ -831,52 +832,52 @@ func (this *ClientConfig) String() string { }, "") return s } -func (this *SpokeCluster) String() string { +func (this *ManagedCluster) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SpokeCluster{`, + s := strings.Join([]string{`&ManagedCluster{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SpokeClusterSpec", "SpokeClusterSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SpokeClusterStatus", "SpokeClusterStatus", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ManagedClusterSpec", "ManagedClusterSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ManagedClusterStatus", "ManagedClusterStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *SpokeClusterList) String() string { +func (this *ManagedClusterList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]SpokeCluster{" + repeatedStringForItems := "[]ManagedCluster{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SpokeCluster", "SpokeCluster", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ManagedCluster", "ManagedCluster", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&SpokeClusterList{`, + s := strings.Join([]string{`&ManagedClusterList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *SpokeClusterSpec) String() string { +func (this *ManagedClusterSpec) String() string { if this == nil { return "nil" } - repeatedStringForSpokeClientConfigs := "[]ClientConfig{" - for _, f := range this.SpokeClientConfigs { - repeatedStringForSpokeClientConfigs += strings.Replace(strings.Replace(f.String(), "ClientConfig", "ClientConfig", 1), `&`, ``, 1) + "," + repeatedStringForManagedClusterClientConfigs := "[]ClientConfig{" + for _, f := range this.ManagedClusterClientConfigs { + repeatedStringForManagedClusterClientConfigs += strings.Replace(strings.Replace(f.String(), "ClientConfig", "ClientConfig", 1), `&`, ``, 1) + "," } - repeatedStringForSpokeClientConfigs += "}" - s := strings.Join([]string{`&SpokeClusterSpec{`, - `SpokeClientConfigs:` + repeatedStringForSpokeClientConfigs + `,`, + repeatedStringForManagedClusterClientConfigs += "}" + s := strings.Join([]string{`&ManagedClusterSpec{`, + `ManagedClusterClientConfigs:` + repeatedStringForManagedClusterClientConfigs + `,`, `HubAcceptsClient:` + fmt.Sprintf("%v", this.HubAcceptsClient) + `,`, `LeaseDurationSeconds:` + fmt.Sprintf("%v", this.LeaseDurationSeconds) + `,`, `}`, }, "") return s } -func (this *SpokeClusterStatus) String() string { +func (this *ManagedClusterStatus) String() string { if this == nil { return "nil" } @@ -905,20 +906,20 @@ func (this *SpokeClusterStatus) String() string { mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) } mapStringForAllocatable += "}" - s := strings.Join([]string{`&SpokeClusterStatus{`, + s := strings.Join([]string{`&ManagedClusterStatus{`, `Conditions:` + repeatedStringForConditions + `,`, `Capacity:` + mapStringForCapacity + `,`, `Allocatable:` + mapStringForAllocatable + `,`, - `Version:` + strings.Replace(strings.Replace(this.Version.String(), "SpokeVersion", "SpokeVersion", 1), `&`, ``, 1) + `,`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "ManagedClusterVersion", "ManagedClusterVersion", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *SpokeVersion) String() string { +func (this *ManagedClusterVersion) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SpokeVersion{`, + s := strings.Join([]string{`&ManagedClusterVersion{`, `Kubernetes:` + fmt.Sprintf("%v", this.Kubernetes) + `,`, `}`, }, "") @@ -1065,7 +1066,7 @@ func (m *ClientConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *SpokeCluster) Unmarshal(dAtA []byte) error { +func (m *ManagedCluster) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1088,10 +1089,10 @@ func (m *SpokeCluster) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpokeCluster: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedCluster: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpokeCluster: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedCluster: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1217,7 +1218,7 @@ func (m *SpokeCluster) Unmarshal(dAtA []byte) error { } return nil } -func (m *SpokeClusterList) Unmarshal(dAtA []byte) error { +func (m *ManagedClusterList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1240,10 +1241,10 @@ func (m *SpokeClusterList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpokeClusterList: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedClusterList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpokeClusterList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedClusterList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1308,7 +1309,7 @@ func (m *SpokeClusterList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, SpokeCluster{}) + m.Items = append(m.Items, ManagedCluster{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1337,7 +1338,7 @@ func (m *SpokeClusterList) Unmarshal(dAtA []byte) error { } return nil } -func (m *SpokeClusterSpec) Unmarshal(dAtA []byte) error { +func (m *ManagedClusterSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1360,15 +1361,15 @@ func (m *SpokeClusterSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpokeClusterSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedClusterSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpokeClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpokeClientConfigs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedClusterClientConfigs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1395,8 +1396,8 @@ func (m *SpokeClusterSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SpokeClientConfigs = append(m.SpokeClientConfigs, ClientConfig{}) - if err := m.SpokeClientConfigs[len(m.SpokeClientConfigs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ManagedClusterClientConfigs = append(m.ManagedClusterClientConfigs, ClientConfig{}) + if err := m.ManagedClusterClientConfigs[len(m.ManagedClusterClientConfigs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1463,7 +1464,7 @@ func (m *SpokeClusterSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *SpokeClusterStatus) Unmarshal(dAtA []byte) error { +func (m *ManagedClusterStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1486,10 +1487,10 @@ func (m *SpokeClusterStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpokeClusterStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedClusterStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpokeClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1841,7 +1842,7 @@ func (m *SpokeClusterStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SpokeVersion) Unmarshal(dAtA []byte) error { +func (m *ManagedClusterVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1864,10 +1865,10 @@ func (m *SpokeVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpokeVersion: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedClusterVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpokeVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedClusterVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/generated.proto b/vendor/github.com/open-cluster-management/api/cluster/v1/generated.proto index 288fb026a..2a6667c74 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/generated.proto +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/generated.proto @@ -12,105 +12,106 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// ClientConfig represents the apiserver address of the spoke cluster. -// TODO include credential to connect to spoke cluster kube-apiserver +// ClientConfig represents the apiserver address of the managed cluster. +// TODO include credential to connect to managed cluster kube-apiserver message ClientConfig { - // URL is the url of apiserver endpoint of the spoke cluster. + // URL is the url of apiserver endpoint of the managed cluster. // +required optional string url = 1; - // CABundle is the ca bundle to connect to apiserver of the spoke cluster. + // CABundle is the ca bundle to connect to apiserver of the managed cluster. // System certs are used if it is not set. // +optional optional bytes caBundle = 2; } -// SpokeCluster represents the desired state and current status of spoke -// cluster. SpokeCluster is a cluster scoped resource. The name is the cluster +// ManagedCluster represents the desired state and current status of managed +// cluster. ManagedCluster is a cluster scoped resource. The name is the cluster // UID. // // The cluster join process follows a double opt-in process: // -// 1. agent on spoke cluster creates CSR on hub with cluster UID and agent name. -// 2. agent on spoke cluster creates spokecluster on hub. -// 3. cluster admin on hub approves the CSR for the spoke's cluster UID and agent name. -// 4. cluster admin set spec.acceptSpokeCluster of spokecluster to true. -// 5. cluster admin on spoke creates credential of kubeconfig to spoke. +// 1. agent on managed cluster creates CSR on hub with cluster UID and agent name. +// 2. agent on managed cluster creates ManagedCluster on hub. +// 3. cluster admin on hub approves the CSR for the ManagedCluster's UID and agent name. +// 4. cluster admin sets spec.acceptClient of ManagedCluster to true. +// 5. cluster admin on managed cluster creates credential of kubeconfig to hub. // -// Once the hub creates the cluster namespace, the spoke agent pushes the -// credential to the hub to use against the spoke's kube-apiserver. -message SpokeCluster { +// Once the hub creates the cluster namespace, the Klusterlet agent on the Managed Cluster +// pushes the credential to the hub to use against the managed cluster's kube-apiserver. +message ManagedCluster { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents a desired configuration for the agent on the spoke cluster. - optional SpokeClusterSpec spec = 2; + // Spec represents a desired configuration for the agent on the managed cluster. + optional ManagedClusterSpec spec = 2; - // Status represents the current status of joined spoke cluster + // Status represents the current status of joined managed cluster // +optional - optional SpokeClusterStatus status = 3; + optional ManagedClusterStatus status = 3; } -// SpokeClusterList is a collection of spoke cluster. -message SpokeClusterList { +// ManagedClusterList is a collection of managed cluster. +message ManagedClusterList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of spoke cluster. - repeated SpokeCluster items = 2; + // Items is a list of managed cluster. + repeated ManagedCluster items = 2; } -// SpokeClusterSpec provides the information to securely connect to a remote server +// ManagedClusterSpec provides the information to securely connect to a remote server // and verify its identity. -message SpokeClusterSpec { - // SpokeClientConfigs represents a list of the apiserver address of the spoke cluster. - // If it is empty, spoke cluster has no accessible address to be visited from hub. +message ManagedClusterSpec { + // ManagedClusterClientConfigs represents a list of the apiserver address of the managed cluster. + // If it is empty, managed cluster has no accessible address to be visited from hub. // +optional - repeated ClientConfig spokeClientConfigs = 1; + repeated ClientConfig managedClusterClientConfigs = 1; - // AcceptSpokeCluster reprsents that hub accepts the join of spoke agent. - // Its default value is false, and can only be set true when the user on hub - // has an RBAC rule to UPDATE on the virtual subresource of spokeclusters/accept. - // When the vaule is set true, a namespace whose name is same as the name of SpokeCluster - // is created on hub representing the spoke cluster, also role/rolebinding is created on - // the namespace to grant the permision of access from agent on spoke. - // When the value is set false, the namespace representing the spoke cluster is + // hubAcceptsClient represents that hub accepts the join of Klusterlet agent on + // the managed cluster to the hub. The default value is false, and can only be set + // true when the user on hub has an RBAC rule to UPDATE on the virtual subresource + // of managedclusters/accept. + // When the value is set true, a namespace whose name is same as the name of ManagedCluster + // is created on hub representing the managed cluster, also role/rolebinding is created on + // the namespace to grant the permision of access from agent on managed cluster. + // When the value is set false, the namespace representing the managed cluster is // deleted. // +required optional bool hubAcceptsClient = 2; - // LeaseDurationSeconds is used to coordinate the lease update time of spoke agents. - // If its value is zero, the spoke agent will update its lease per 60s by default + // LeaseDurationSeconds is used to coordinate the lease update time of Klusterlet agents on the managed cluster. + // If its value is zero, the Klusterlet agent will update its lease every 60s by default // +optional optional int32 leaseDurationSeconds = 3; } -// SpokeClusterStatus represents the current status of joined spoke cluster. -message SpokeClusterStatus { - // Conditions contains the different condition statuses for this spoke cluster. +// ManagedClusterStatus represents the current status of joined managed cluster. +message ManagedClusterStatus { + // Conditions contains the different condition statuses for this managed cluster. repeated StatusCondition conditions = 1; // Capacity represents the total resource capacity from all nodeStatuses - // on the spoke cluster. + // on the managed cluster. map capacity = 2; - // Allocatable represents the total allocatable resources on the spoke cluster. + // Allocatable represents the total allocatable resources on the managed cluster. map allocatable = 3; - // Version represents the kubernetes version of the spoke cluster. - optional SpokeVersion version = 4; + // Version represents the kubernetes version of the managed cluster. + optional ManagedClusterVersion version = 4; } -// SpokeVersion represents version information about the spoke cluster. -// TODO add spoke agent versions -message SpokeVersion { - // Kubernetes is the kubernetes version of spoke cluster +// ManagedClusterVersion represents version information about the managed cluster. +// TODO add managed agent versions +message ManagedClusterVersion { + // Kubernetes is the kubernetes version of managed cluster. // +optional optional string kubernetes = 1; } -// StatusCondition contains condition information for a spoke cluster. +// StatusCondition contains condition information for a managed cluster. message StatusCondition { // Type is the type of the cluster condition. // +required diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/register.go b/vendor/github.com/open-cluster-management/api/cluster/v1/register.go index d146c6a8b..6ac0fdc41 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/register.go +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/register.go @@ -30,8 +30,8 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, - &SpokeCluster{}, - &SpokeClusterList{}, + &ManagedCluster{}, + &ManagedClusterList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/types.go b/vendor/github.com/open-cluster-management/api/cluster/v1/types.go index 3ba3da2cf..6818c904b 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/types.go +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/types.go @@ -9,116 +9,117 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// SpokeCluster represents the desired state and current status of spoke -// cluster. SpokeCluster is a cluster scoped resource. The name is the cluster +// ManagedCluster represents the desired state and current status of managed +// cluster. ManagedCluster is a cluster scoped resource. The name is the cluster // UID. // // The cluster join process follows a double opt-in process: // -// 1. agent on spoke cluster creates CSR on hub with cluster UID and agent name. -// 2. agent on spoke cluster creates spokecluster on hub. -// 3. cluster admin on hub approves the CSR for the spoke's cluster UID and agent name. -// 4. cluster admin set spec.acceptSpokeCluster of spokecluster to true. -// 5. cluster admin on spoke creates credential of kubeconfig to spoke. +// 1. agent on managed cluster creates CSR on hub with cluster UID and agent name. +// 2. agent on managed cluster creates ManagedCluster on hub. +// 3. cluster admin on hub approves the CSR for the ManagedCluster's UID and agent name. +// 4. cluster admin sets spec.acceptClient of ManagedCluster to true. +// 5. cluster admin on managed cluster creates credential of kubeconfig to hub. // -// Once the hub creates the cluster namespace, the spoke agent pushes the -// credential to the hub to use against the spoke's kube-apiserver. -type SpokeCluster struct { +// Once the hub creates the cluster namespace, the Klusterlet agent on the Managed Cluster +// pushes the credential to the hub to use against the managed cluster's kube-apiserver. +type ManagedCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents a desired configuration for the agent on the spoke cluster. - Spec SpokeClusterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Spec represents a desired configuration for the agent on the managed cluster. + Spec ManagedClusterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current status of joined spoke cluster + // Status represents the current status of joined managed cluster // +optional - Status SpokeClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + Status ManagedClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// SpokeClusterSpec provides the information to securely connect to a remote server +// ManagedClusterSpec provides the information to securely connect to a remote server // and verify its identity. -type SpokeClusterSpec struct { - // SpokeClientConfigs represents a list of the apiserver address of the spoke cluster. - // If it is empty, spoke cluster has no accessible address to be visited from hub. +type ManagedClusterSpec struct { + // ManagedClusterClientConfigs represents a list of the apiserver address of the managed cluster. + // If it is empty, managed cluster has no accessible address to be visited from hub. // +optional - SpokeClientConfigs []ClientConfig `json:"spokeClientConfigs,omitempty" protobuf:"bytes,1,opt,name=spokeClientConfigs"` + ManagedClusterClientConfigs []ClientConfig `json:"managedClusterClientConfigs,omitempty" protobuf:"bytes,1,opt,name=managedClusterClientConfigs"` - // AcceptSpokeCluster reprsents that hub accepts the join of spoke agent. - // Its default value is false, and can only be set true when the user on hub - // has an RBAC rule to UPDATE on the virtual subresource of spokeclusters/accept. - // When the vaule is set true, a namespace whose name is same as the name of SpokeCluster - // is created on hub representing the spoke cluster, also role/rolebinding is created on - // the namespace to grant the permision of access from agent on spoke. - // When the value is set false, the namespace representing the spoke cluster is + // hubAcceptsClient represents that hub accepts the join of Klusterlet agent on + // the managed cluster to the hub. The default value is false, and can only be set + // true when the user on hub has an RBAC rule to UPDATE on the virtual subresource + // of managedclusters/accept. + // When the value is set true, a namespace whose name is same as the name of ManagedCluster + // is created on hub representing the managed cluster, also role/rolebinding is created on + // the namespace to grant the permision of access from agent on managed cluster. + // When the value is set false, the namespace representing the managed cluster is // deleted. // +required HubAcceptsClient bool `json:"hubAcceptsClient" protobuf:"bytes,2,opt,name=hubAcceptsClient"` - // LeaseDurationSeconds is used to coordinate the lease update time of spoke agents. - // If its value is zero, the spoke agent will update its lease per 60s by default + // LeaseDurationSeconds is used to coordinate the lease update time of Klusterlet agents on the managed cluster. + // If its value is zero, the Klusterlet agent will update its lease every 60s by default // +optional LeaseDurationSeconds int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,3,opt,name=leaseDurationSeconds"` } -// ClientConfig represents the apiserver address of the spoke cluster. -// TODO include credential to connect to spoke cluster kube-apiserver +// ClientConfig represents the apiserver address of the managed cluster. +// TODO include credential to connect to managed cluster kube-apiserver type ClientConfig struct { - // URL is the url of apiserver endpoint of the spoke cluster. + // URL is the url of apiserver endpoint of the managed cluster. // +required URL string `json:"url" protobuf:"bytes,1,opt,name=url"` - // CABundle is the ca bundle to connect to apiserver of the spoke cluster. + // CABundle is the ca bundle to connect to apiserver of the managed cluster. // System certs are used if it is not set. // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` } -// SpokeClusterStatus represents the current status of joined spoke cluster. -type SpokeClusterStatus struct { - // Conditions contains the different condition statuses for this spoke cluster. +// ManagedClusterStatus represents the current status of joined managed cluster. +type ManagedClusterStatus struct { + // Conditions contains the different condition statuses for this managed cluster. Conditions []StatusCondition `json:"conditions" protobuf:"bytes,1,rep,name=conditions"` // Capacity represents the total resource capacity from all nodeStatuses - // on the spoke cluster. + // on the managed cluster. Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // Allocatable represents the total allocatable resources on the spoke cluster. + // Allocatable represents the total allocatable resources on the managed cluster. Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,3,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` - // Version represents the kubernetes version of the spoke cluster. - Version SpokeVersion `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Version represents the kubernetes version of the managed cluster. + Version ManagedClusterVersion `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` } -// SpokeVersion represents version information about the spoke cluster. -// TODO add spoke agent versions -type SpokeVersion struct { - // Kubernetes is the kubernetes version of spoke cluster +// ManagedClusterVersion represents version information about the managed cluster. +// TODO add managed agent versions +type ManagedClusterVersion struct { + // Kubernetes is the kubernetes version of managed cluster. // +optional Kubernetes string `json:"kubernetes,omitempty" protobuf:"bytes,1,opt,name=kubernetes"` } const ( - // SpokeClusterConditionJoined means the spoke cluster has successfully joined the hub - SpokeClusterConditionJoined string = "SpokeClusterJoined" - // SpokeClusterConditionHubAccepted means the request to join the cluster is + // ManagedClusterConditionJoined means the managed cluster has successfully joined the hub + ManagedClusterConditionJoined string = "ManagedClusterJoined" + // ManagedClusterConditionHubAccepted means the request to join the cluster is // approved by cluster-admin on hub - SpokeClusterConditionHubAccepted string = "HubAcceptedSpoke" - // SpokeClusterConditionHubDenied means the request to join the cluster is denied by + ManagedClusterConditionHubAccepted string = "HubAcceptedManagedCluster" + // ManagedClusterConditionHubDenied means the request to join the cluster is denied by // cluster-admin on hub - SpokeClusterConditionHubDenied string = "HubDeniedSpoke" - // SpokeClusterConditionAvailable means the spoke cluster is available, if a spoke - // cluster is available, the kube-apiserver is health and the registration agent is - // running with the minimum deployment on this spoke cluster - SpokeClusterConditionAvailable string = "SpokeClusterConditionAvailable" + ManagedClusterConditionHubDenied string = "HubDeniedManagedCluster" + // ManagedClusterConditionAvailable means the managed cluster is available, if a managed + // cluster is available, the kube-apiserver is healthy and the Klusterlet agent is + // running with the minimum deployment on this managed cluster + ManagedClusterConditionAvailable string = "ManagedClusterConditionAvailable" ) // ResourceName is the name identifying various resources in a ResourceList. type ResourceName string const ( - // CPU, in cores. (500m = .5 cores) + // ResourceCPU defines the number of CPUs in cores. (500m = .5 cores) ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // ResourceMemory defines the amount of memory in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceMemory ResourceName = "memory" ) @@ -126,7 +127,7 @@ const ( // matches the ResourceList defined in k8s.io/api/core/v1 type ResourceList map[ResourceName]resource.Quantity -// StatusCondition contains condition information for a spoke cluster. +// StatusCondition contains condition information for a managed cluster. type StatusCondition struct { // Type is the type of the cluster condition. // +required @@ -151,14 +152,14 @@ type StatusCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// SpokeClusterList is a collection of spoke cluster. -type SpokeClusterList struct { +// ManagedClusterList is a collection of managed cluster. +type ManagedClusterList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of spoke cluster. - Items []SpokeCluster `json:"items" protobuf:"bytes,2,rep,name=items"` + // Items is a list of managed cluster. + Items []ManagedCluster `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.deepcopy.go b/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.deepcopy.go index a60b68404..8e44582e5 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.deepcopy.go @@ -30,29 +30,7 @@ func (in *ClientConfig) DeepCopy() *ClientConfig { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourceList) DeepCopyInto(out *ResourceList) { - { - in := &in - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. -func (in ResourceList) DeepCopy() ResourceList { - if in == nil { - return nil - } - out := new(ResourceList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpokeCluster) DeepCopyInto(out *SpokeCluster) { +func (in *ManagedCluster) DeepCopyInto(out *ManagedCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -61,18 +39,18 @@ func (in *SpokeCluster) DeepCopyInto(out *SpokeCluster) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeCluster. -func (in *SpokeCluster) DeepCopy() *SpokeCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedCluster. +func (in *ManagedCluster) DeepCopy() *ManagedCluster { if in == nil { return nil } - out := new(SpokeCluster) + out := new(ManagedCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SpokeCluster) DeepCopyObject() runtime.Object { +func (in *ManagedCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -80,13 +58,13 @@ func (in *SpokeCluster) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpokeClusterList) DeepCopyInto(out *SpokeClusterList) { +func (in *ManagedClusterList) DeepCopyInto(out *ManagedClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]SpokeCluster, len(*in)) + *out = make([]ManagedCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -94,18 +72,18 @@ func (in *SpokeClusterList) DeepCopyInto(out *SpokeClusterList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeClusterList. -func (in *SpokeClusterList) DeepCopy() *SpokeClusterList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterList. +func (in *ManagedClusterList) DeepCopy() *ManagedClusterList { if in == nil { return nil } - out := new(SpokeClusterList) + out := new(ManagedClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SpokeClusterList) DeepCopyObject() runtime.Object { +func (in *ManagedClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -113,10 +91,10 @@ func (in *SpokeClusterList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpokeClusterSpec) DeepCopyInto(out *SpokeClusterSpec) { +func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { *out = *in - if in.SpokeClientConfigs != nil { - in, out := &in.SpokeClientConfigs, &out.SpokeClientConfigs + if in.ManagedClusterClientConfigs != nil { + in, out := &in.ManagedClusterClientConfigs, &out.ManagedClusterClientConfigs *out = make([]ClientConfig, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) @@ -125,18 +103,18 @@ func (in *SpokeClusterSpec) DeepCopyInto(out *SpokeClusterSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeClusterSpec. -func (in *SpokeClusterSpec) DeepCopy() *SpokeClusterSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. +func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { if in == nil { return nil } - out := new(SpokeClusterSpec) + out := new(ManagedClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpokeClusterStatus) DeepCopyInto(out *SpokeClusterStatus) { +func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions @@ -163,32 +141,54 @@ func (in *SpokeClusterStatus) DeepCopyInto(out *SpokeClusterStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeClusterStatus. -func (in *SpokeClusterStatus) DeepCopy() *SpokeClusterStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterStatus. +func (in *ManagedClusterStatus) DeepCopy() *ManagedClusterStatus { if in == nil { return nil } - out := new(SpokeClusterStatus) + out := new(ManagedClusterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpokeVersion) DeepCopyInto(out *SpokeVersion) { +func (in *ManagedClusterVersion) DeepCopyInto(out *ManagedClusterVersion) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpokeVersion. -func (in *SpokeVersion) DeepCopy() *SpokeVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterVersion. +func (in *ManagedClusterVersion) DeepCopy() *ManagedClusterVersion { if in == nil { return nil } - out := new(SpokeVersion) + out := new(ManagedClusterVersion) in.DeepCopyInto(out) return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatusCondition) DeepCopyInto(out *StatusCondition) { *out = *in diff --git a/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.swagger_doc_generated.go index 836694fb6..c5c9c3805 100644 --- a/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/open-cluster-management/api/cluster/v1/zz_generated.swagger_doc_generated.go @@ -12,69 +12,69 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_ClientConfig = map[string]string{ - "": "ClientConfig represents the apiserver address of the spoke cluster.", - "url": "URL is the url of apiserver endpoint of the spoke cluster.", - "caBundle": "CABundle is the ca bundle to connect to apiserver of the spoke cluster. System certs are used if it is not set.", + "": "ClientConfig represents the apiserver address of the managed cluster.", + "url": "URL is the url of apiserver endpoint of the managed cluster.", + "caBundle": "CABundle is the ca bundle to connect to apiserver of the managed cluster. System certs are used if it is not set.", } func (ClientConfig) SwaggerDoc() map[string]string { return map_ClientConfig } -var map_SpokeCluster = map[string]string{ - "": "SpokeCluster represents the desired state and current status of spoke cluster. SpokeCluster is a cluster scoped resource. The name is the cluster UID.\n\nThe cluster join process follows a double opt-in process:\n\n1. agent on spoke cluster creates CSR on hub with cluster UID and agent name. 2. agent on spoke cluster creates spokecluster on hub. 3. cluster admin on hub approves the CSR for the spoke's cluster UID and agent name. 4. cluster admin set spec.acceptSpokeCluster of spokecluster to true. 5. cluster admin on spoke creates credential of kubeconfig to spoke.\n\nOnce the hub creates the cluster namespace, the spoke agent pushes the credential to the hub to use against the spoke's kube-apiserver.", - "spec": "Spec represents a desired configuration for the agent on the spoke cluster.", - "status": "Status represents the current status of joined spoke cluster", +var map_ManagedCluster = map[string]string{ + "": "ManagedCluster represents the desired state and current status of managed cluster. ManagedCluster is a cluster scoped resource. The name is the cluster UID.\n\nThe cluster join process follows a double opt-in process:\n\n1. agent on managed cluster creates CSR on hub with cluster UID and agent name. 2. agent on managed cluster creates ManagedCluster on hub. 3. cluster admin on hub approves the CSR for the ManagedCluster's UID and agent name. 4. cluster admin sets spec.acceptClient of ManagedCluster to true. 5. cluster admin on managed cluster creates credential of kubeconfig to hub.\n\nOnce the hub creates the cluster namespace, the Klusterlet agent on the Managed Cluster pushes the credential to the hub to use against the managed cluster's kube-apiserver.", + "spec": "Spec represents a desired configuration for the agent on the managed cluster.", + "status": "Status represents the current status of joined managed cluster", } -func (SpokeCluster) SwaggerDoc() map[string]string { - return map_SpokeCluster +func (ManagedCluster) SwaggerDoc() map[string]string { + return map_ManagedCluster } -var map_SpokeClusterList = map[string]string{ - "": "SpokeClusterList is a collection of spoke cluster.", +var map_ManagedClusterList = map[string]string{ + "": "ManagedClusterList is a collection of managed cluster.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of spoke cluster.", + "items": "Items is a list of managed cluster.", } -func (SpokeClusterList) SwaggerDoc() map[string]string { - return map_SpokeClusterList +func (ManagedClusterList) SwaggerDoc() map[string]string { + return map_ManagedClusterList } -var map_SpokeClusterSpec = map[string]string{ - "": "SpokeClusterSpec provides the information to securely connect to a remote server and verify its identity.", - "spokeClientConfigs": "SpokeClientConfigs represents a list of the apiserver address of the spoke cluster. If it is empty, spoke cluster has no accessible address to be visited from hub.", - "hubAcceptsClient": "AcceptSpokeCluster reprsents that hub accepts the join of spoke agent. Its default value is false, and can only be set true when the user on hub has an RBAC rule to UPDATE on the virtual subresource of spokeclusters/accept. When the vaule is set true, a namespace whose name is same as the name of SpokeCluster is created on hub representing the spoke cluster, also role/rolebinding is created on the namespace to grant the permision of access from agent on spoke. When the value is set false, the namespace representing the spoke cluster is deleted.", - "leaseDurationSeconds": "LeaseDurationSeconds is used to coordinate the lease update time of spoke agents. If its value is zero, the spoke agent will update its lease per 60s by default", +var map_ManagedClusterSpec = map[string]string{ + "": "ManagedClusterSpec provides the information to securely connect to a remote server and verify its identity.", + "managedClusterClientConfigs": "ManagedClusterClientConfigs represents a list of the apiserver address of the managed cluster. If it is empty, managed cluster has no accessible address to be visited from hub.", + "hubAcceptsClient": "hubAcceptsClient represents that hub accepts the join of Klusterlet agent on the managed cluster to the hub. The default value is false, and can only be set true when the user on hub has an RBAC rule to UPDATE on the virtual subresource of managedclusters/accept. When the value is set true, a namespace whose name is same as the name of ManagedCluster is created on hub representing the managed cluster, also role/rolebinding is created on the namespace to grant the permision of access from agent on managed cluster. When the value is set false, the namespace representing the managed cluster is deleted.", + "leaseDurationSeconds": "LeaseDurationSeconds is used to coordinate the lease update time of Klusterlet agents on the managed cluster. If its value is zero, the Klusterlet agent will update its lease every 60s by default", } -func (SpokeClusterSpec) SwaggerDoc() map[string]string { - return map_SpokeClusterSpec +func (ManagedClusterSpec) SwaggerDoc() map[string]string { + return map_ManagedClusterSpec } -var map_SpokeClusterStatus = map[string]string{ - "": "SpokeClusterStatus represents the current status of joined spoke cluster.", - "conditions": "Conditions contains the different condition statuses for this spoke cluster.", - "capacity": "Capacity represents the total resource capacity from all nodeStatuses on the spoke cluster.", - "allocatable": "Allocatable represents the total allocatable resources on the spoke cluster.", - "version": "Version represents the kubernetes version of the spoke cluster.", +var map_ManagedClusterStatus = map[string]string{ + "": "ManagedClusterStatus represents the current status of joined managed cluster.", + "conditions": "Conditions contains the different condition statuses for this managed cluster.", + "capacity": "Capacity represents the total resource capacity from all nodeStatuses on the managed cluster.", + "allocatable": "Allocatable represents the total allocatable resources on the managed cluster.", + "version": "Version represents the kubernetes version of the managed cluster.", } -func (SpokeClusterStatus) SwaggerDoc() map[string]string { - return map_SpokeClusterStatus +func (ManagedClusterStatus) SwaggerDoc() map[string]string { + return map_ManagedClusterStatus } -var map_SpokeVersion = map[string]string{ - "": "SpokeVersion represents version information about the spoke cluster.", - "kubernetes": "Kubernetes is the kubernetes version of spoke cluster", +var map_ManagedClusterVersion = map[string]string{ + "": "ManagedClusterVersion represents version information about the managed cluster.", + "kubernetes": "Kubernetes is the kubernetes version of managed cluster.", } -func (SpokeVersion) SwaggerDoc() map[string]string { - return map_SpokeVersion +func (ManagedClusterVersion) SwaggerDoc() map[string]string { + return map_ManagedClusterVersion } var map_StatusCondition = map[string]string{ - "": "StatusCondition contains condition information for a spoke cluster.", + "": "StatusCondition contains condition information for a managed cluster.", "type": "Type is the type of the cluster condition.", "status": "Status is the status of the condition. One of True, False, Unknown.", "lastTransitionTime": "LastTransitionTime is the last time the condition changed from one status to another.", diff --git a/vendor/github.com/open-cluster-management/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/github.com/open-cluster-management/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 7a2cc5cde..5bc3e8ad1 100644 --- a/vendor/github.com/open-cluster-management/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/github.com/open-cluster-management/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -34,18 +34,18 @@ spec: metadata: type: object spec: - description: Spec represents the desired deployment configuration of klusterlet + description: Spec represents the desired deployment configuration of Klusterlet agent. type: object properties: clusterName: - description: ClusterName is the name of the spoke cluster to be created - on hub. The spoke agent generates a random name if it is not set, - or discovers the appropriate cluster name on openshift. + description: ClusterName is the name of the managed cluster to be created + on hub. The Klusterlet agent generates a random name if it is not + set, or discovers the appropriate cluster name on openshift. type: string externalServerURLs: description: ExternalServerURLs represents the a list of apiserver urls - and ca bundles that is accessible externally If it is set empty, spoke + and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. type: array items: @@ -55,17 +55,17 @@ spec: properties: caBundle: description: CABundle is the ca bundle to connect to apiserver - of the spoke cluster. System certs are used if it is not set. + of the managed cluster. System certs are used if it is not set. type: string format: byte url: - description: URL is the url of apiserver endpoint of the spoke + description: URL is the url of apiserver endpoint of the managed cluster. type: string namespace: description: Namespace is the namespace to deploy the agent. The namespace must have a prefix of "open-cluster-management-", and if it is not - set, the namespace of "open-cluster-management-spoke" is used to deploy + set, the namespace of "open-cluster-management-agent" is used to deploy agent. type: string registrationImagePullSpec: @@ -77,16 +77,17 @@ spec: of work agent. type: string status: - description: Status represents the current status of klusterlet agent. + description: Status represents the current status of Klusterlet agent. type: object properties: conditions: description: 'Conditions contain the different condition statuses for - this spokecore. Valid condition types are: Applied: components in - spoke is applied. Available: components in spoke are available and - ready to serve. Progressing: components in spoke are in a transitioning - state. Degraded: components in spoke do not match the desired configuration - and only provide degraded service.' + this Klusterlet. Valid condition types are: Applied: components have + been applied in the managed cluster. Available: components in the + managed cluster are available and ready to serve. Progressing: components + in the managed cluster are in a transitioning state. Degraded: components + in the managed cluster do not match the desired configuration and + only provide degraded service.' type: array items: description: StatusCondition contains condition information. diff --git a/vendor/github.com/open-cluster-management/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/vendor/github.com/open-cluster-management/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index d4612e13e..b623aa556 100644 --- a/vendor/github.com/open-cluster-management/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/vendor/github.com/open-cluster-management/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -16,7 +16,7 @@ spec: validation: openAPIV3Schema: description: ClusterManager configures the controllers on the hub that govern - registration and work distribution for attached klusterlets. ClusterManager + registration and work distribution for attached Klusterlets. ClusterManager will be only deployed in open-cluster-management-hub namespace. type: object properties: @@ -34,7 +34,7 @@ spec: type: object spec: description: Spec represents a desired deployment configuration of controllers - that govern registration and work distribution for attached klusterlets. + that govern registration and work distribution for attached Klusterlets. type: object properties: registrationImagePullSpec: diff --git a/vendor/github.com/open-cluster-management/api/operator/v1/generated.proto b/vendor/github.com/open-cluster-management/api/operator/v1/generated.proto index 815f8a3e7..166cceac8 100644 --- a/vendor/github.com/open-cluster-management/api/operator/v1/generated.proto +++ b/vendor/github.com/open-cluster-management/api/operator/v1/generated.proto @@ -11,12 +11,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// ClusterManager configures the controllers on the hub that govern registration and work distribution for attached klusterlets. +// ClusterManager configures the controllers on the hub that govern registration and work distribution for attached Klusterlets. // ClusterManager will be only deployed in open-cluster-management-hub namespace. message ClusterManager { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets. + // Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets. optional ClusterManagerSpec spec = 2; // Status represents the current status of controllers that govern the lifecycle of managed clusters. @@ -35,7 +35,7 @@ message ClusterManagerList { repeated ClusterManager items = 2; } -// ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets. +// ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets. message ClusterManagerSpec { // RegistrationImagePullSpec represents the desired image of registration controller installed on hub. // +required @@ -60,29 +60,29 @@ message ClusterManagerStatus { message Klusterlet { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents the desired deployment configuration of klusterlet agent. + // Spec represents the desired deployment configuration of Klusterlet agent. optional KlusterletSpec spec = 2; - // Status represents the current status of klusterlet agent. + // Status represents the current status of Klusterlet agent. optional KlusterletStatus status = 3; } -// KlusterletList is a collection of klusterlet agent. +// KlusterletList is a collection of Klusterlet agents. message KlusterletList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of klusterlet agent. + // Items is a list of Klusterlet agent. repeated Klusterlet items = 2; } -// KlusterletSpec represents the desired deployment configuration of klusterlet agent. +// KlusterletSpec represents the desired deployment configuration of Klusterlet agent. message KlusterletSpec { // Namespace is the namespace to deploy the agent. // The namespace must have a prefix of "open-cluster-management-", and if it is not set, - // the namespace of "open-cluster-management-spoke" is used to deploy agent. + // the namespace of "open-cluster-management-agent" is used to deploy agent. // +optional optional string namespace = 1; @@ -94,36 +94,36 @@ message KlusterletSpec { // +required optional string workImagePullSpec = 3; - // ClusterName is the name of the spoke cluster to be created on hub. - // The spoke agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift. + // ClusterName is the name of the managed cluster to be created on hub. + // The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift. // +optional optional string clusterName = 4; // ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally - // If it is set empty, spoke cluster has no externally accessible url that hub cluster can visit. + // If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. // +optional repeated ServerURL externalServerURLs = 5; } -// KlusterletStatus represents the current status of klusterlet agent. +// KlusterletStatus represents the current status of Klusterlet agent. message KlusterletStatus { - // Conditions contain the different condition statuses for this spokecore. + // Conditions contain the different condition statuses for this Klusterlet. // Valid condition types are: - // Applied: components in spoke is applied. - // Available: components in spoke are available and ready to serve. - // Progressing: components in spoke are in a transitioning state. - // Degraded: components in spoke do not match the desired configuration and only provide + // Applied: components have been applied in the managed cluster. + // Available: components in the managed cluster are available and ready to serve. + // Progressing: components in the managed cluster are in a transitioning state. + // Degraded: components in the managed cluster do not match the desired configuration and only provide // degraded service. repeated StatusCondition conditions = 1; } // ServerURL represents the apiserver url and ca bundle that is accessible externally message ServerURL { - // URL is the url of apiserver endpoint of the spoke cluster. + // URL is the url of apiserver endpoint of the managed cluster. // +required optional string url = 1; - // CABundle is the ca bundle to connect to apiserver of the spoke cluster. + // CABundle is the ca bundle to connect to apiserver of the managed cluster. // System certs are used if it is not set. // +optional optional bytes caBundle = 2; diff --git a/vendor/github.com/open-cluster-management/api/operator/v1/types.go b/vendor/github.com/open-cluster-management/api/operator/v1/types.go index c1a3efe94..c2de0ef72 100644 --- a/vendor/github.com/open-cluster-management/api/operator/v1/types.go +++ b/vendor/github.com/open-cluster-management/api/operator/v1/types.go @@ -10,13 +10,13 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster -// ClusterManager configures the controllers on the hub that govern registration and work distribution for attached klusterlets. +// ClusterManager configures the controllers on the hub that govern registration and work distribution for attached Klusterlets. // ClusterManager will be only deployed in open-cluster-management-hub namespace. type ClusterManager struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets. + // Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets. Spec ClusterManagerSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Status represents the current status of controllers that govern the lifecycle of managed clusters. @@ -24,7 +24,7 @@ type ClusterManager struct { Status ClusterManagerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets. +// ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets. type ClusterManagerSpec struct { // RegistrationImagePullSpec represents the desired image of registration controller installed on hub. // +required @@ -70,18 +70,18 @@ type Klusterlet struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents the desired deployment configuration of klusterlet agent. + // Spec represents the desired deployment configuration of Klusterlet agent. Spec KlusterletSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current status of klusterlet agent. + // Status represents the current status of Klusterlet agent. Status KlusterletStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// KlusterletSpec represents the desired deployment configuration of klusterlet agent. +// KlusterletSpec represents the desired deployment configuration of Klusterlet agent. type KlusterletSpec struct { // Namespace is the namespace to deploy the agent. // The namespace must have a prefix of "open-cluster-management-", and if it is not set, - // the namespace of "open-cluster-management-spoke" is used to deploy agent. + // the namespace of "open-cluster-management-agent" is used to deploy agent. // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` @@ -93,44 +93,44 @@ type KlusterletSpec struct { // +required WorkImagePullSpec string `json:"workImagePullSpec,omitempty" protobuf:"bytes,3,opt,name=workImagePullSpec"` - // ClusterName is the name of the spoke cluster to be created on hub. - // The spoke agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift. + // ClusterName is the name of the managed cluster to be created on hub. + // The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift. // +optional ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,4,opt,name=clusterName"` // ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally - // If it is set empty, spoke cluster has no externally accessible url that hub cluster can visit. + // If it is set empty, managed cluster has no externally accessible url that hub cluster can visit. // +optional ExternalServerURLs []ServerURL `json:"externalServerURLs,omitempty" protobuf:"bytes,5,opt,name=externalServerURLs"` } // ServerURL represents the apiserver url and ca bundle that is accessible externally type ServerURL struct { - // URL is the url of apiserver endpoint of the spoke cluster. + // URL is the url of apiserver endpoint of the managed cluster. // +required URL string `json:"url" protobuf:"bytes,1,opt,name=url"` - // CABundle is the ca bundle to connect to apiserver of the spoke cluster. + // CABundle is the ca bundle to connect to apiserver of the managed cluster. // System certs are used if it is not set. // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` } -// KlusterletStatus represents the current status of klusterlet agent. +// KlusterletStatus represents the current status of Klusterlet agent. type KlusterletStatus struct { - // Conditions contain the different condition statuses for this spokecore. + // Conditions contain the different condition statuses for this Klusterlet. // Valid condition types are: - // Applied: components in spoke is applied. - // Available: components in spoke are available and ready to serve. - // Progressing: components in spoke are in a transitioning state. - // Degraded: components in spoke do not match the desired configuration and only provide + // Applied: components have been applied in the managed cluster. + // Available: components in the managed cluster are available and ready to serve. + // Progressing: components in the managed cluster are in a transitioning state. + // Degraded: components in the managed cluster do not match the desired configuration and only provide // degraded service. Conditions []StatusCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// KlusterletList is a collection of klusterlet agent. +// KlusterletList is a collection of Klusterlet agents. type KlusterletList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. @@ -138,7 +138,7 @@ type KlusterletList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of klusterlet agent. + // Items is a list of Klusterlet agent. Items []Klusterlet `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/open-cluster-management/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/open-cluster-management/api/operator/v1/zz_generated.swagger_doc_generated.go index 0c9806365..c444e9262 100644 --- a/vendor/github.com/open-cluster-management/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/open-cluster-management/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -12,8 +12,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_ClusterManager = map[string]string{ - "": "ClusterManager configures the controllers on the hub that govern registration and work distribution for attached klusterlets. ClusterManager will be only deployed in open-cluster-management-hub namespace.", - "spec": "Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets.", + "": "ClusterManager configures the controllers on the hub that govern registration and work distribution for attached Klusterlets. ClusterManager will be only deployed in open-cluster-management-hub namespace.", + "spec": "Spec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets.", "status": "Status represents the current status of controllers that govern the lifecycle of managed clusters.", } @@ -32,7 +32,7 @@ func (ClusterManagerList) SwaggerDoc() map[string]string { } var map_ClusterManagerSpec = map[string]string{ - "": "ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached klusterlets.", + "": "ClusterManagerSpec represents a desired deployment configuration of controllers that govern registration and work distribution for attached Klusterlets.", "registrationImagePullSpec": "RegistrationImagePullSpec represents the desired image of registration controller installed on hub.", } @@ -51,8 +51,8 @@ func (ClusterManagerStatus) SwaggerDoc() map[string]string { var map_Klusterlet = map[string]string{ "": "Klusterlet represents controllers on the managed cluster. When configured, the Klusterlet requires a secret named of bootstrap-hub-kubeconfig in the same namespace to allow API requests to the hub for the registration protocol.", - "spec": "Spec represents the desired deployment configuration of klusterlet agent.", - "status": "Status represents the current status of klusterlet agent.", + "spec": "Spec represents the desired deployment configuration of Klusterlet agent.", + "status": "Status represents the current status of Klusterlet agent.", } func (Klusterlet) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Klusterlet) SwaggerDoc() map[string]string { } var map_KlusterletList = map[string]string{ - "": "KlusterletList is a collection of klusterlet agent.", + "": "KlusterletList is a collection of Klusterlet agents.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of klusterlet agent.", + "items": "Items is a list of Klusterlet agent.", } func (KlusterletList) SwaggerDoc() map[string]string { @@ -70,12 +70,12 @@ func (KlusterletList) SwaggerDoc() map[string]string { } var map_KlusterletSpec = map[string]string{ - "": "KlusterletSpec represents the desired deployment configuration of klusterlet agent.", - "namespace": "Namespace is the namespace to deploy the agent. The namespace must have a prefix of \"open-cluster-management-\", and if it is not set, the namespace of \"open-cluster-management-spoke\" is used to deploy agent.", + "": "KlusterletSpec represents the desired deployment configuration of Klusterlet agent.", + "namespace": "Namespace is the namespace to deploy the agent. The namespace must have a prefix of \"open-cluster-management-\", and if it is not set, the namespace of \"open-cluster-management-agent\" is used to deploy agent.", "registrationImagePullSpec": "RegistrationImagePullSpec represents the desired image configuration of registration agent.", "workImagePullSpec": "WorkImagePullSpec represents the desired image configuration of work agent.", - "clusterName": "ClusterName is the name of the spoke cluster to be created on hub. The spoke agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift.", - "externalServerURLs": "ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally If it is set empty, spoke cluster has no externally accessible url that hub cluster can visit.", + "clusterName": "ClusterName is the name of the managed cluster to be created on hub. The Klusterlet agent generates a random name if it is not set, or discovers the appropriate cluster name on openshift.", + "externalServerURLs": "ExternalServerURLs represents the a list of apiserver urls and ca bundles that is accessible externally If it is set empty, managed cluster has no externally accessible url that hub cluster can visit.", } func (KlusterletSpec) SwaggerDoc() map[string]string { @@ -83,8 +83,8 @@ func (KlusterletSpec) SwaggerDoc() map[string]string { } var map_KlusterletStatus = map[string]string{ - "": "KlusterletStatus represents the current status of klusterlet agent.", - "conditions": "Conditions contain the different condition statuses for this spokecore. Valid condition types are: Applied: components in spoke is applied. Available: components in spoke are available and ready to serve. Progressing: components in spoke are in a transitioning state. Degraded: components in spoke do not match the desired configuration and only provide degraded service.", + "": "KlusterletStatus represents the current status of Klusterlet agent.", + "conditions": "Conditions contain the different condition statuses for this Klusterlet. Valid condition types are: Applied: components have been applied in the managed cluster. Available: components in the managed cluster are available and ready to serve. Progressing: components in the managed cluster are in a transitioning state. Degraded: components in the managed cluster do not match the desired configuration and only provide degraded service.", } func (KlusterletStatus) SwaggerDoc() map[string]string { @@ -93,8 +93,8 @@ func (KlusterletStatus) SwaggerDoc() map[string]string { var map_ServerURL = map[string]string{ "": "ServerURL represents the apiserver url and ca bundle that is accessible externally", - "url": "URL is the url of apiserver endpoint of the spoke cluster.", - "caBundle": "CABundle is the ca bundle to connect to apiserver of the spoke cluster. System certs are used if it is not set.", + "url": "URL is the url of apiserver endpoint of the managed cluster.", + "caBundle": "CABundle is the ca bundle to connect to apiserver of the managed cluster. System certs are used if it is not set.", } func (ServerURL) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/open-cluster-management/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml b/vendor/github.com/open-cluster-management/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml index 0dfcc943f..133c5cc81 100644 --- a/vendor/github.com/open-cluster-management/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +++ b/vendor/github.com/open-cluster-management/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml @@ -17,10 +17,10 @@ spec: validation: openAPIV3Schema: description: ManifestWork represents a manifests workload that hub wants to - deploy on the spoke cluster. A manifest workload is defined as a set of kubernetes - resources. ManifestWork must be created in the cluster namespace on the hub, - so that agent on the corresponding spoke cluster can access this resource - and deploy on the spoke cluster. + deploy on the managed cluster. A manifest workload is defined as a set of + kubernetes resources. ManifestWork must be created in the cluster namespace + on the hub, so that agent on the corresponding managed cluster can access + this resource and deploy on the managed cluster. type: object properties: apiVersion: @@ -37,21 +37,21 @@ spec: type: object spec: description: Spec represents a desired configuration of work to be deployed - on the spoke cluster. + on the managed cluster. type: object properties: workload: description: Workload represents the manifest workload to be deployed - on spoke cluster + on managed cluster type: object properties: manifests: description: Manifests represents a list of kuberenetes resources - to be deployed on the spoke cluster. + to be deployed on the managed cluster. type: array items: description: Manifest represents a resource to be deployed on - spoke cluster + managed cluster type: object x-kubernetes-preserve-unknown-fields: true x-kubernetes-embedded-resource: true @@ -65,7 +65,7 @@ spec: GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed - from spoke cluster. The deleted resource may still be present until + from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. @@ -95,15 +95,15 @@ spec: conditions: description: 'Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload - in ManifestWork is applied successfully on spoke cluster. 2. Progressing - represents workload in ManifestWork is being applied on spoke cluster. - 3. Available represents workload in ManifestWork exists on the spoke + in ManifestWork is applied successfully on managed cluster. 2. Progressing + represents workload in ManifestWork is being applied on managed cluster. + 3. Available represents workload in ManifestWork exists on the managed cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period.' type: array items: description: StatusCondition contains condition information for a - spoke work. + ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -124,35 +124,35 @@ spec: False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceStatus: description: ResourceStatus represents the status of each resource in - manifestwork deployed on spoke cluster. The agent on spoke cluster - syncs the condition from spoke to the hub. + manifestwork deployed on managed cluster. The Klusterlet agent on + managed cluster syncs the condition from managed to the hub. type: object properties: manifests: description: 'Manifests represents the condition of manifests deployed - on spoke cluster. Valid condition types are: 1. Progressing represents - the resource is being applied on spoke cluster. 2. Applied represents - the resource is applied successfully on spoke cluster. 3. Available - represents the resource exists on the spoke cluster. 4. Degraded - represents the current state of resource does not match the desired - state for a certain period.' + on managed cluster. Valid condition types are: 1. Progressing + represents the resource is being applied on managed cluster. 2. + Applied represents the resource is applied successfully on managed + cluster. 3. Available represents the resource exists on the managed + cluster. 4. Degraded represents the current state of resource + does not match the desired state for a certain period.' type: array items: description: ManifestCondition represents the conditions of the - resources deployed on spoke cluster + resources deployed on managed cluster type: object properties: conditions: description: Conditions represents the conditions of this - resource on spoke cluster + resource on managed cluster type: array items: description: StatusCondition contains condition information - for a spoke work. + for a ManifestWork applied to a managed cluster. type: object properties: lastTransitionTime: @@ -173,7 +173,7 @@ spec: One of True, False, Unknown. type: string type: - description: Type is the type of the spoke work condition. + description: Type is the type of the ManifestWork condition. type: string resourceMeta: description: ResourceMeta represents the gvk, name and namespace diff --git a/vendor/github.com/open-cluster-management/api/work/v1/generated.proto b/vendor/github.com/open-cluster-management/api/work/v1/generated.proto index b4aa3eda9..6563e09ec 100644 --- a/vendor/github.com/open-cluster-management/api/work/v1/generated.proto +++ b/vendor/github.com/open-cluster-management/api/work/v1/generated.proto @@ -37,7 +37,7 @@ message AppliedManifestResourceMeta { optional string namespace = 5; } -// Manifest represents a resource to be deployed on spoke cluster +// Manifest represents a resource to be deployed on managed cluster message Manifest { // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields @@ -45,13 +45,13 @@ message Manifest { } // ManifestCondition represents the conditions of the resources deployed on -// spoke cluster +// managed cluster message ManifestCondition { // ResourceMeta represents the gvk, name and namespace of a resoure // +required optional ManifestResourceMeta resourceMeta = 1; - // Conditions represents the conditions of this resource on spoke cluster + // Conditions represents the conditions of this resource on managed cluster // +required repeated StatusCondition conditions = 2; } @@ -88,27 +88,27 @@ message ManifestResourceMeta { } // ManifestResourceStatus represents the status of each resource in manifest work deployed on -// spoke cluster +// managed cluster message ManifestResourceStatus { - // Manifests represents the condition of manifests deployed on spoke cluster. + // Manifests represents the condition of manifests deployed on managed cluster. // Valid condition types are: - // 1. Progressing represents the resource is being applied on spoke cluster. - // 2. Applied represents the resource is applied successfully on spoke cluster. - // 3. Available represents the resource exists on the spoke cluster. + // 1. Progressing represents the resource is being applied on managed cluster. + // 2. Applied represents the resource is applied successfully on managed cluster. + // 3. Available represents the resource exists on the managed cluster. // 4. Degraded represents the current state of resource does not match the desired // state for a certain period. repeated ManifestCondition manifests = 2; } -// ManifestWork represents a manifests workload that hub wants to deploy on the spoke cluster. +// ManifestWork represents a manifests workload that hub wants to deploy on the managed cluster. // A manifest workload is defined as a set of kubernetes resources. // ManifestWork must be created in the cluster namespace on the hub, so that agent on the -// corresponding spoke cluster can access this resource and deploy on the spoke +// corresponding managed cluster can access this resource and deploy on the managed // cluster. message ManifestWork { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents a desired configuration of work to be deployed on the spoke cluster. + // Spec represents a desired configuration of work to be deployed on the managed cluster. optional ManifestWorkSpec spec = 2; // Status represents the current status of work @@ -127,48 +127,48 @@ message ManifestWorkList { repeated ManifestWork items = 2; } -// ManifestWorkSpec represents a desired configuration of manifests to be deployed on the spoke cluster. +// ManifestWorkSpec represents a desired configuration of manifests to be deployed on the managed cluster. message ManifestWorkSpec { - // Workload represents the manifest workload to be deployed on spoke cluster + // Workload represents the manifest workload to be deployed on managed cluster optional ManifestsTemplate workload = 1; } -// ManifestWorkStatus represents the current status of spoke manifest workload +// ManifestWorkStatus represents the current status of managed cluster ManifestWork message ManifestWorkStatus { // Conditions contains the different condition statuses for this work. // Valid condition types are: - // 1. Applied represents workload in ManifestWork is applied successfully on spoke cluster. - // 2. Progressing represents workload in ManifestWork is being applied on spoke cluster. - // 3. Available represents workload in ManifestWork exists on the spoke cluster. + // 1. Applied represents workload in ManifestWork is applied successfully on managed cluster. + // 2. Progressing represents workload in ManifestWork is being applied on managed cluster. + // 3. Available represents workload in ManifestWork exists on the managed cluster. // 4. Degraded represents the current state of workload does not match the desired // state for a certain period. repeated StatusCondition conditions = 1; // ResourceStatus represents the status of each resource in manifestwork deployed on - // spoke cluster. The agent on spoke cluster syncs the condition from spoke to the hub. + // managed cluster. The Klusterlet agent on managed cluster syncs the condition from managed to the hub. // +optional optional ManifestResourceStatus resourceStatus = 2; // AppliedResources represents a list of resources defined within the manifestwork that are applied. // Only resources with valid GroupVersionResource, namespace, and name are suitable. // An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. - // The resource relating to the item will also be removed from spoke cluster. + // The resource relating to the item will also be removed from managed cluster. // The deleted resource may still be present until the finalizers for that resource are finished. // However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. // +optional repeated AppliedManifestResourceMeta appliedResources = 3; } -// ManifestsTemplate represents the manifest workload to be deployed on spoke cluster +// ManifestsTemplate represents the manifest workload to be deployed on managed cluster message ManifestsTemplate { - // Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster. + // Manifests represents a list of kuberenetes resources to be deployed on the managed cluster. // +optional repeated Manifest manifests = 1; } -// StatusCondition contains condition information for a spoke work. +// StatusCondition contains condition information for a ManifestWork applied to a managed cluster. message StatusCondition { - // Type is the type of the spoke work condition. + // Type is the type of the ManifestWork condition. // +required optional string type = 1; diff --git a/vendor/github.com/open-cluster-management/api/work/v1/types.go b/vendor/github.com/open-cluster-management/api/work/v1/types.go index 81c988743..cfeec24a0 100644 --- a/vendor/github.com/open-cluster-management/api/work/v1/types.go +++ b/vendor/github.com/open-cluster-management/api/work/v1/types.go @@ -5,9 +5,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// StatusCondition contains condition information for a spoke work. +// StatusCondition contains condition information for a ManifestWork applied to a managed cluster. type StatusCondition struct { - // Type is the type of the spoke work condition. + // Type is the type of the ManifestWork condition. // +required Type string `json:"type" protobuf:"bytes,1,opt,name=type"` @@ -32,16 +32,16 @@ type StatusCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:subresource:status -// ManifestWork represents a manifests workload that hub wants to deploy on the spoke cluster. +// ManifestWork represents a manifests workload that hub wants to deploy on the managed cluster. // A manifest workload is defined as a set of kubernetes resources. // ManifestWork must be created in the cluster namespace on the hub, so that agent on the -// corresponding spoke cluster can access this resource and deploy on the spoke +// corresponding managed cluster can access this resource and deploy on the managed // cluster. type ManifestWork struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents a desired configuration of work to be deployed on the spoke cluster. + // Spec represents a desired configuration of work to be deployed on the managed cluster. Spec ManifestWorkSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Status represents the current status of work @@ -49,22 +49,22 @@ type ManifestWork struct { Status ManifestWorkStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// ManifestWorkSpec represents a desired configuration of manifests to be deployed on the spoke cluster. +// ManifestWorkSpec represents a desired configuration of manifests to be deployed on the managed cluster. type ManifestWorkSpec struct { - // Workload represents the manifest workload to be deployed on spoke cluster + // Workload represents the manifest workload to be deployed on managed cluster Workload ManifestsTemplate `json:"workload,omitempty" protobuf:"bytes,1,opt,name=workload"` } -// Manifest represents a resource to be deployed on spoke cluster +// Manifest represents a resource to be deployed on managed cluster type Manifest struct { // +kubebuilder:validation:EmbeddedResource // +kubebuilder:pruning:PreserveUnknownFields runtime.RawExtension `json:",inline" protobuf:"bytes,1,opt,name=rawExtension"` } -// ManifestsTemplate represents the manifest workload to be deployed on spoke cluster +// ManifestsTemplate represents the manifest workload to be deployed on managed cluster type ManifestsTemplate struct { - // Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster. + // Manifests represents a list of kuberenetes resources to be deployed on the managed cluster. // +optional Manifests []Manifest `json:"manifests,omitempty" protobuf:"bytes,1,rep,name=manifests"` } @@ -125,26 +125,26 @@ type AppliedManifestResourceMeta struct { Namespace string `json:"namespace" protobuf:"bytes,5,opt,name=namespace"` } -// ManifestWorkStatus represents the current status of spoke manifest workload +// ManifestWorkStatus represents the current status of managed cluster ManifestWork type ManifestWorkStatus struct { // Conditions contains the different condition statuses for this work. // Valid condition types are: - // 1. Applied represents workload in ManifestWork is applied successfully on spoke cluster. - // 2. Progressing represents workload in ManifestWork is being applied on spoke cluster. - // 3. Available represents workload in ManifestWork exists on the spoke cluster. + // 1. Applied represents workload in ManifestWork is applied successfully on managed cluster. + // 2. Progressing represents workload in ManifestWork is being applied on managed cluster. + // 3. Available represents workload in ManifestWork exists on the managed cluster. // 4. Degraded represents the current state of workload does not match the desired // state for a certain period. Conditions []StatusCondition `json:"conditions" protobuf:"bytes,1,rep,name=conditions"` // ResourceStatus represents the status of each resource in manifestwork deployed on - // spoke cluster. The agent on spoke cluster syncs the condition from spoke to the hub. + // managed cluster. The Klusterlet agent on managed cluster syncs the condition from managed to the hub. // +optional ResourceStatus ManifestResourceStatus `json:"resourceStatus,omitempty" protobuf:"bytes,2,rep,name=resourceStatus"` // AppliedResources represents a list of resources defined within the manifestwork that are applied. // Only resources with valid GroupVersionResource, namespace, and name are suitable. // An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. - // The resource relating to the item will also be removed from spoke cluster. + // The resource relating to the item will also be removed from managed cluster. // The deleted resource may still be present until the finalizers for that resource are finished. // However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved. // +optional @@ -152,13 +152,13 @@ type ManifestWorkStatus struct { } // ManifestResourceStatus represents the status of each resource in manifest work deployed on -// spoke cluster +// managed cluster type ManifestResourceStatus struct { - // Manifests represents the condition of manifests deployed on spoke cluster. + // Manifests represents the condition of manifests deployed on managed cluster. // Valid condition types are: - // 1. Progressing represents the resource is being applied on spoke cluster. - // 2. Applied represents the resource is applied successfully on spoke cluster. - // 3. Available represents the resource exists on the spoke cluster. + // 1. Progressing represents the resource is being applied on managed cluster. + // 2. Applied represents the resource is applied successfully on managed cluster. + // 3. Available represents the resource exists on the managed cluster. // 4. Degraded represents the current state of resource does not match the desired // state for a certain period. Manifests []ManifestCondition `json:"manifests,omitempty" protobuf:"bytes,2,opt,name=manifests"` @@ -169,13 +169,13 @@ type WorkStatusConditionType string const ( // WorkProgressing represents that the work is in the progress to be - // applied on the spoke cluster. + // applied on the managed cluster. WorkProgressing WorkStatusConditionType = "Progressing" // WorkApplied represents that the workload defined in work is - // succesfully applied on the spoke cluster. + // succesfully applied on the managed cluster. WorkApplied WorkStatusConditionType = "Applied" // WorkAvailable represents that all resources of the work exists on - // the spoke cluster. + // the managed cluster. WorkAvailable WorkStatusConditionType = "Available" // WorkDegraded represents that the current state of work does not match // the desired state for a certain period. @@ -183,29 +183,29 @@ const ( ) // ManifestCondition represents the conditions of the resources deployed on -// spoke cluster +// managed cluster type ManifestCondition struct { // ResourceMeta represents the gvk, name and namespace of a resoure // +required ResourceMeta ManifestResourceMeta `json:"resourceMeta" protobuf:"bytes,1,opt,name=resourceMeta"` - // Conditions represents the conditions of this resource on spoke cluster + // Conditions represents the conditions of this resource on managed cluster // +required Conditions []StatusCondition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` } // ManifestConditionType represents the condition type of a single -// resource manifest deployed on the spoke cluster. +// resource manifest deployed on the managed cluster. type ManifestConditionType string const ( - // ManifestProgressing represents the resource is being applied on the spoke cluster + // ManifestProgressing represents the resource is being applied on the managed cluster ManifestProgressing ManifestConditionType = "Progressing" // ManifestApplied represents that the resource object is applied - // on the spoke cluster. + // on the managed cluster. ManifestApplied ManifestConditionType = "Applied" // ManifestAvailable represents that the resource object exists - // on the spoke cluster. + // on the managed cluster. ManifestAvailable ManifestConditionType = "Available" // ManifestDegraded represents that the current state of resource object does not // match the desired state for a certain period. diff --git a/vendor/github.com/open-cluster-management/api/work/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/open-cluster-management/api/work/v1/zz_generated.swagger_doc_generated.go index 5201bada4..122543b93 100644 --- a/vendor/github.com/open-cluster-management/api/work/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/open-cluster-management/api/work/v1/zz_generated.swagger_doc_generated.go @@ -25,7 +25,7 @@ func (AppliedManifestResourceMeta) SwaggerDoc() map[string]string { } var map_Manifest = map[string]string{ - "": "Manifest represents a resource to be deployed on spoke cluster", + "": "Manifest represents a resource to be deployed on managed cluster", } func (Manifest) SwaggerDoc() map[string]string { @@ -33,9 +33,9 @@ func (Manifest) SwaggerDoc() map[string]string { } var map_ManifestCondition = map[string]string{ - "": "ManifestCondition represents the conditions of the resources deployed on spoke cluster", + "": "ManifestCondition represents the conditions of the resources deployed on managed cluster", "resourceMeta": "ResourceMeta represents the gvk, name and namespace of a resoure", - "conditions": "Conditions represents the conditions of this resource on spoke cluster", + "conditions": "Conditions represents the conditions of this resource on managed cluster", } func (ManifestCondition) SwaggerDoc() map[string]string { @@ -58,8 +58,8 @@ func (ManifestResourceMeta) SwaggerDoc() map[string]string { } var map_ManifestResourceStatus = map[string]string{ - "": "ManifestResourceStatus represents the status of each resource in manifest work deployed on spoke cluster", - "manifests": "Manifests represents the condition of manifests deployed on spoke cluster. Valid condition types are: 1. Progressing represents the resource is being applied on spoke cluster. 2. Applied represents the resource is applied successfully on spoke cluster. 3. Available represents the resource exists on the spoke cluster. 4. Degraded represents the current state of resource does not match the desired state for a certain period.", + "": "ManifestResourceStatus represents the status of each resource in manifest work deployed on managed cluster", + "manifests": "Manifests represents the condition of manifests deployed on managed cluster. Valid condition types are: 1. Progressing represents the resource is being applied on managed cluster. 2. Applied represents the resource is applied successfully on managed cluster. 3. Available represents the resource exists on the managed cluster. 4. Degraded represents the current state of resource does not match the desired state for a certain period.", } func (ManifestResourceStatus) SwaggerDoc() map[string]string { @@ -67,8 +67,8 @@ func (ManifestResourceStatus) SwaggerDoc() map[string]string { } var map_ManifestWork = map[string]string{ - "": "ManifestWork represents a manifests workload that hub wants to deploy on the spoke cluster. A manifest workload is defined as a set of kubernetes resources. ManifestWork must be created in the cluster namespace on the hub, so that agent on the corresponding spoke cluster can access this resource and deploy on the spoke cluster.", - "spec": "Spec represents a desired configuration of work to be deployed on the spoke cluster.", + "": "ManifestWork represents a manifests workload that hub wants to deploy on the managed cluster. A manifest workload is defined as a set of kubernetes resources. ManifestWork must be created in the cluster namespace on the hub, so that agent on the corresponding managed cluster can access this resource and deploy on the managed cluster.", + "spec": "Spec represents a desired configuration of work to be deployed on the managed cluster.", "status": "Status represents the current status of work", } @@ -87,8 +87,8 @@ func (ManifestWorkList) SwaggerDoc() map[string]string { } var map_ManifestWorkSpec = map[string]string{ - "": "ManifestWorkSpec represents a desired configuration of manifests to be deployed on the spoke cluster.", - "workload": "Workload represents the manifest workload to be deployed on spoke cluster", + "": "ManifestWorkSpec represents a desired configuration of manifests to be deployed on the managed cluster.", + "workload": "Workload represents the manifest workload to be deployed on managed cluster", } func (ManifestWorkSpec) SwaggerDoc() map[string]string { @@ -96,10 +96,10 @@ func (ManifestWorkSpec) SwaggerDoc() map[string]string { } var map_ManifestWorkStatus = map[string]string{ - "": "ManifestWorkStatus represents the current status of spoke manifest workload", - "conditions": "Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in ManifestWork is applied successfully on spoke cluster. 2. Progressing represents workload in ManifestWork is being applied on spoke cluster. 3. Available represents workload in ManifestWork exists on the spoke cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period.", - "resourceStatus": "ResourceStatus represents the status of each resource in manifestwork deployed on spoke cluster. The agent on spoke cluster syncs the condition from spoke to the hub.", - "appliedResources": "AppliedResources represents a list of resources defined within the manifestwork that are applied. Only resources with valid GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed from spoke cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved.", + "": "ManifestWorkStatus represents the current status of managed cluster ManifestWork", + "conditions": "Conditions contains the different condition statuses for this work. Valid condition types are: 1. Applied represents workload in ManifestWork is applied successfully on managed cluster. 2. Progressing represents workload in ManifestWork is being applied on managed cluster. 3. Available represents workload in ManifestWork exists on the managed cluster. 4. Degraded represents the current state of workload does not match the desired state for a certain period.", + "resourceStatus": "ResourceStatus represents the status of each resource in manifestwork deployed on managed cluster. The Klusterlet agent on managed cluster syncs the condition from managed to the hub.", + "appliedResources": "AppliedResources represents a list of resources defined within the manifestwork that are applied. Only resources with valid GroupVersionResource, namespace, and name are suitable. An item in this slice is deleted when there is no mapped manifest in manifestwork.Spec or by finalizer. The resource relating to the item will also be removed from managed cluster. The deleted resource may still be present until the finalizers for that resource are finished. However, the resource will not be undeleted, so it can be removed from this list and eventual consistency is preserved.", } func (ManifestWorkStatus) SwaggerDoc() map[string]string { @@ -107,8 +107,8 @@ func (ManifestWorkStatus) SwaggerDoc() map[string]string { } var map_ManifestsTemplate = map[string]string{ - "": "ManifestsTemplate represents the manifest workload to be deployed on spoke cluster", - "manifests": "Manifests represents a list of kuberenetes resources to be deployed on the spoke cluster.", + "": "ManifestsTemplate represents the manifest workload to be deployed on managed cluster", + "manifests": "Manifests represents a list of kuberenetes resources to be deployed on the managed cluster.", } func (ManifestsTemplate) SwaggerDoc() map[string]string { @@ -116,8 +116,8 @@ func (ManifestsTemplate) SwaggerDoc() map[string]string { } var map_StatusCondition = map[string]string{ - "": "StatusCondition contains condition information for a spoke work.", - "type": "Type is the type of the spoke work condition.", + "": "StatusCondition contains condition information for a ManifestWork applied to a managed cluster.", + "type": "Type is the type of the ManifestWork condition.", "status": "Status is the status of the condition. One of True, False, Unknown.", "lastTransitionTime": "LastTransitionTime is the last time the condition changed from one status to another.", "reason": "Reason is a (brief) reason for the condition's last status change.", diff --git a/vendor/modules.txt b/vendor/modules.txt index 049cb6c21..febdf614c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -133,7 +133,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/open-cluster-management/api v0.0.0-20200601153054-56b58ce890e1 +# github.com/open-cluster-management/api v0.0.0-20200602195039-a516cac2e038 github.com/open-cluster-management/api/client/operator/clientset/versioned github.com/open-cluster-management/api/client/operator/clientset/versioned/fake github.com/open-cluster-management/api/client/operator/clientset/versioned/scheme