mirror of
https://github.com/clastix/kamaji.git
synced 2026-03-03 10:11:33 +00:00
Compare commits
9 Commits
helm-v0.4.
...
helm-v0.6.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aceeced53a | ||
|
|
53c9102ef3 | ||
|
|
15e1cf7d80 | ||
|
|
f853f25195 | ||
|
|
5acdc4cc41 | ||
|
|
360e8200cb | ||
|
|
b0c6972873 | ||
|
|
682006f8aa | ||
|
|
d59f494a69 |
@@ -22,6 +22,7 @@ COPY main.go main.go
|
||||
COPY api/ api/
|
||||
COPY controllers/ controllers/
|
||||
COPY internal/ internal/
|
||||
COPY indexers/ indexers/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH go build \
|
||||
|
||||
@@ -75,10 +75,11 @@ type DataStoreSetupStatus struct {
|
||||
|
||||
// StorageStatus defines the observed state of StorageStatus.
|
||||
type StorageStatus struct {
|
||||
Driver string `json:"driver,omitempty"`
|
||||
Config DataStoreConfigStatus `json:"config,omitempty"`
|
||||
Setup DataStoreSetupStatus `json:"setup,omitempty"`
|
||||
Certificate DataStoreCertificateStatus `json:"certificate,omitempty"`
|
||||
Driver string `json:"driver,omitempty"`
|
||||
DataStoreName string `json:"dataStoreName,omitempty"`
|
||||
Config DataStoreConfigStatus `json:"config,omitempty"`
|
||||
Setup DataStoreSetupStatus `json:"setup,omitempty"`
|
||||
Certificate DataStoreCertificateStatus `json:"certificate,omitempty"`
|
||||
}
|
||||
|
||||
// KubeconfigStatus contains information about the generated kubeconfig.
|
||||
|
||||
@@ -85,6 +85,11 @@ type ControlPlaneComponentsResources struct {
|
||||
type DeploymentSpec struct {
|
||||
// +kubebuilder:default=2
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
// TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology
|
||||
// domains. Scheduler will schedule pods in a way which abides by the constraints.
|
||||
// In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used.
|
||||
// All topologySpreadConstraints are ANDed.
|
||||
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
||||
// Resources defines the amount of memory and CPU to allocate to each component of the Control Plane
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
Resources *ControlPlaneComponentsResources `json:"resources,omitempty"`
|
||||
@@ -144,6 +149,10 @@ type AddonsSpec struct {
|
||||
|
||||
// TenantControlPlaneSpec defines the desired state of TenantControlPlane.
|
||||
type TenantControlPlaneSpec struct {
|
||||
// DataStore allows to specify a DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
// This parameter is optional and acts as an override over the default one which is used by the Kamaji Operator.
|
||||
// Migration from a different DataStore to another one is not yet supported and the reconciliation will be blocked.
|
||||
DataStore string `json:"dataStore,omitempty"`
|
||||
ControlPlane ControlPlane `json:"controlPlane"`
|
||||
// Kubernetes specification for tenant control plane
|
||||
Kubernetes KubernetesSpec `json:"kubernetes"`
|
||||
|
||||
@@ -520,6 +520,13 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
*out = *in
|
||||
if in.TopologySpreadConstraints != nil {
|
||||
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
|
||||
*out = make([]v1.TopologySpreadConstraint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(ControlPlaneComponentsResources)
|
||||
|
||||
@@ -15,7 +15,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -268,6 +268,194 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
topologySpreadConstraints:
|
||||
description: TopologySpreadConstraints describes how the Tenant
|
||||
Control Plane pods ought to spread across topology domains.
|
||||
Scheduler will schedule pods in a way which abides by the
|
||||
constraints. In case of nil underlying LabelSelector, the
|
||||
Kamaji one for the given Tenant Control Plane will be used.
|
||||
All topologySpreadConstraints are ANDed.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread
|
||||
matching pods among the given topology.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching
|
||||
pods. Pods that match this label selector are counted
|
||||
to determine the number of pods in their corresponding
|
||||
topology domain.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a
|
||||
selector that contains values, a key, and an
|
||||
operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the
|
||||
operator is Exists or DoesNotExist, the
|
||||
values array must be empty. This array is
|
||||
replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value". The
|
||||
requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys
|
||||
to select the pods over which spreading will be calculated.
|
||||
The keys are used to lookup values from the incoming
|
||||
pod labels, those key-value labels are ANDed with
|
||||
labelSelector to select the group of existing pods
|
||||
over which spreading will be calculated for the incoming
|
||||
pod. Keys that don't exist in the incoming pod labels
|
||||
will be ignored. A null or empty list means only match
|
||||
against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which
|
||||
pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
|
||||
it is the maximum permitted difference between the
|
||||
number of matching pods in the target topology and
|
||||
the global minimum. The global minimum is the minimum
|
||||
number of matching pods in an eligible domain or zero
|
||||
if the number of eligible domains is less than MinDomains.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to
|
||||
1, and pods with the same labelSelector spread as
|
||||
2/2/1: In this case, the global minimum is 1. | zone1
|
||||
| zone2 | zone3 | | P P | P P | P | - if MaxSkew
|
||||
is 1, incoming pod can only be scheduled to zone3
|
||||
to become 2/2/2; scheduling it onto zone1(zone2) would
|
||||
make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1).
|
||||
- if MaxSkew is 2, incoming pod can be scheduled onto
|
||||
any zone. When `whenUnsatisfiable=ScheduleAnyway`,
|
||||
it is used to give higher precedence to topologies
|
||||
that satisfy it. It''s a required field. Default value
|
||||
is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number
|
||||
of eligible domains. When the number of eligible domains
|
||||
with matching topology keys is less than minDomains,
|
||||
Pod Topology Spread treats \"global minimum\" as 0,
|
||||
and then the calculation of Skew is performed. And
|
||||
when the number of eligible domains with matching
|
||||
topology keys equals or greater than minDomains, this
|
||||
value has no effect on scheduling. As a result, when
|
||||
the number of eligible domains is less than minDomains,
|
||||
scheduler won't schedule more than maxSkew Pods to
|
||||
those domains. If value is nil, the constraint behaves
|
||||
as if MinDomains is equal to 1. Valid values are integers
|
||||
greater than 0. When value is not nil, WhenUnsatisfiable
|
||||
must be DoNotSchedule. \n For example, in a 3-zone
|
||||
cluster, MaxSkew is set to 2, MinDomains is set to
|
||||
5 and pods with the same labelSelector spread as 2/2/2:
|
||||
| zone1 | zone2 | zone3 | | P P | P P | P P |
|
||||
The number of domains is less than 5(MinDomains),
|
||||
so \"global minimum\" is treated as 0. In this situation,
|
||||
new pod with the same labelSelector cannot be scheduled,
|
||||
because computed skew will be 3(3 - 0) if new Pod
|
||||
is scheduled to any of the three zones, it will violate
|
||||
MaxSkew. \n This is a beta field and requires the
|
||||
MinDomainsInPodTopologySpread feature gate to be enabled
|
||||
(enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will
|
||||
treat Pod's nodeAffinity/nodeSelector when calculating
|
||||
pod topology spread skew. Options are: - Honor: only
|
||||
nodes matching nodeAffinity/nodeSelector are included
|
||||
in the calculations. - Ignore: nodeAffinity/nodeSelector
|
||||
are ignored. All nodes are included in the calculations.
|
||||
\n If this value is nil, the behavior is equivalent
|
||||
to the Honor policy. This is a alpha-level feature
|
||||
enabled by the NodeInclusionPolicyInPodTopologySpread
|
||||
feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will
|
||||
treat node taints when calculating pod topology spread
|
||||
skew. Options are: - Honor: nodes without taints,
|
||||
along with tainted nodes for which the incoming pod
|
||||
has a toleration, are included. - Ignore: node taints
|
||||
are ignored. All nodes are included. \n If this value
|
||||
is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread
|
||||
feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels.
|
||||
Nodes that have a label with this key and identical
|
||||
values are considered to be in the same topology.
|
||||
We consider each <key, value> as a "bucket", and try
|
||||
to put balanced number of pods into each bucket. We
|
||||
define a domain as a particular instance of a topology.
|
||||
Also, we define an eligible domain as a domain whose
|
||||
nodes meet the requirements of nodeAffinityPolicy
|
||||
and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname",
|
||||
each Node is a domain of that topology. And, if TopologyKey
|
||||
is "topology.kubernetes.io/zone", each zone is a domain
|
||||
of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal
|
||||
with a pod if it doesn''t satisfy the spread constraint.
|
||||
- DoNotSchedule (default) tells the scheduler not
|
||||
to schedule it. - ScheduleAnyway tells the scheduler
|
||||
to schedule the pod in any location, but giving higher
|
||||
precedence to topologies that would help reduce the
|
||||
skew. A constraint is considered "Unsatisfiable" for
|
||||
an incoming pod if and only if every possible node
|
||||
assignment for that pod would violate "MaxSkew" on
|
||||
some topology. For example, in a 3-zone cluster, MaxSkew
|
||||
is set to 1, and pods with the same labelSelector
|
||||
spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P
|
||||
| P | P | If WhenUnsatisfiable is set to DoNotSchedule,
|
||||
incoming pod can only be scheduled to zone2(zone3)
|
||||
to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3)
|
||||
satisfies MaxSkew(1). In other words, the cluster
|
||||
can still be imbalanced, but scheduler won''t make
|
||||
it *more* imbalanced. It''s a required field.'
|
||||
type: string
|
||||
required:
|
||||
- maxSkew
|
||||
- topologyKey
|
||||
- whenUnsatisfiable
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
ingress:
|
||||
description: Defining the options for an Optional Ingress which
|
||||
@@ -328,6 +516,14 @@ spec:
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
dataStore:
|
||||
description: DataStore allows to specify a DataStore that should be
|
||||
used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
This parameter is optional and acts as an override over the default
|
||||
one which is used by the Kamaji Operator. Migration from a different
|
||||
DataStore to another one is not yet supported and the reconciliation
|
||||
will be blocked.
|
||||
type: string
|
||||
kubernetes:
|
||||
description: Kubernetes specification for tenant control plane
|
||||
properties:
|
||||
@@ -1308,6 +1504,8 @@ spec:
|
||||
secretName:
|
||||
type: string
|
||||
type: object
|
||||
dataStoreName:
|
||||
type: string
|
||||
driver:
|
||||
type: string
|
||||
setup:
|
||||
|
||||
@@ -268,6 +268,194 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
topologySpreadConstraints:
|
||||
description: TopologySpreadConstraints describes how the Tenant
|
||||
Control Plane pods ought to spread across topology domains.
|
||||
Scheduler will schedule pods in a way which abides by the
|
||||
constraints. In case of nil underlying LabelSelector, the
|
||||
Kamaji one for the given Tenant Control Plane will be used.
|
||||
All topologySpreadConstraints are ANDed.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread
|
||||
matching pods among the given topology.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching
|
||||
pods. Pods that match this label selector are counted
|
||||
to determine the number of pods in their corresponding
|
||||
topology domain.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a
|
||||
selector that contains values, a key, and an
|
||||
operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are
|
||||
In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the
|
||||
operator is Exists or DoesNotExist, the
|
||||
values array must be empty. This array is
|
||||
replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value". The
|
||||
requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys
|
||||
to select the pods over which spreading will be calculated.
|
||||
The keys are used to lookup values from the incoming
|
||||
pod labels, those key-value labels are ANDed with
|
||||
labelSelector to select the group of existing pods
|
||||
over which spreading will be calculated for the incoming
|
||||
pod. Keys that don't exist in the incoming pod labels
|
||||
will be ignored. A null or empty list means only match
|
||||
against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which
|
||||
pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
|
||||
it is the maximum permitted difference between the
|
||||
number of matching pods in the target topology and
|
||||
the global minimum. The global minimum is the minimum
|
||||
number of matching pods in an eligible domain or zero
|
||||
if the number of eligible domains is less than MinDomains.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to
|
||||
1, and pods with the same labelSelector spread as
|
||||
2/2/1: In this case, the global minimum is 1. | zone1
|
||||
| zone2 | zone3 | | P P | P P | P | - if MaxSkew
|
||||
is 1, incoming pod can only be scheduled to zone3
|
||||
to become 2/2/2; scheduling it onto zone1(zone2) would
|
||||
make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1).
|
||||
- if MaxSkew is 2, incoming pod can be scheduled onto
|
||||
any zone. When `whenUnsatisfiable=ScheduleAnyway`,
|
||||
it is used to give higher precedence to topologies
|
||||
that satisfy it. It''s a required field. Default value
|
||||
is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number
|
||||
of eligible domains. When the number of eligible domains
|
||||
with matching topology keys is less than minDomains,
|
||||
Pod Topology Spread treats \"global minimum\" as 0,
|
||||
and then the calculation of Skew is performed. And
|
||||
when the number of eligible domains with matching
|
||||
topology keys equals or greater than minDomains, this
|
||||
value has no effect on scheduling. As a result, when
|
||||
the number of eligible domains is less than minDomains,
|
||||
scheduler won't schedule more than maxSkew Pods to
|
||||
those domains. If value is nil, the constraint behaves
|
||||
as if MinDomains is equal to 1. Valid values are integers
|
||||
greater than 0. When value is not nil, WhenUnsatisfiable
|
||||
must be DoNotSchedule. \n For example, in a 3-zone
|
||||
cluster, MaxSkew is set to 2, MinDomains is set to
|
||||
5 and pods with the same labelSelector spread as 2/2/2:
|
||||
| zone1 | zone2 | zone3 | | P P | P P | P P |
|
||||
The number of domains is less than 5(MinDomains),
|
||||
so \"global minimum\" is treated as 0. In this situation,
|
||||
new pod with the same labelSelector cannot be scheduled,
|
||||
because computed skew will be 3(3 - 0) if new Pod
|
||||
is scheduled to any of the three zones, it will violate
|
||||
MaxSkew. \n This is a beta field and requires the
|
||||
MinDomainsInPodTopologySpread feature gate to be enabled
|
||||
(enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will
|
||||
treat Pod's nodeAffinity/nodeSelector when calculating
|
||||
pod topology spread skew. Options are: - Honor: only
|
||||
nodes matching nodeAffinity/nodeSelector are included
|
||||
in the calculations. - Ignore: nodeAffinity/nodeSelector
|
||||
are ignored. All nodes are included in the calculations.
|
||||
\n If this value is nil, the behavior is equivalent
|
||||
to the Honor policy. This is a alpha-level feature
|
||||
enabled by the NodeInclusionPolicyInPodTopologySpread
|
||||
feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will
|
||||
treat node taints when calculating pod topology spread
|
||||
skew. Options are: - Honor: nodes without taints,
|
||||
along with tainted nodes for which the incoming pod
|
||||
has a toleration, are included. - Ignore: node taints
|
||||
are ignored. All nodes are included. \n If this value
|
||||
is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread
|
||||
feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels.
|
||||
Nodes that have a label with this key and identical
|
||||
values are considered to be in the same topology.
|
||||
We consider each <key, value> as a "bucket", and try
|
||||
to put balanced number of pods into each bucket. We
|
||||
define a domain as a particular instance of a topology.
|
||||
Also, we define an eligible domain as a domain whose
|
||||
nodes meet the requirements of nodeAffinityPolicy
|
||||
and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname",
|
||||
each Node is a domain of that topology. And, if TopologyKey
|
||||
is "topology.kubernetes.io/zone", each zone is a domain
|
||||
of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal
|
||||
with a pod if it doesn''t satisfy the spread constraint.
|
||||
- DoNotSchedule (default) tells the scheduler not
|
||||
to schedule it. - ScheduleAnyway tells the scheduler
|
||||
to schedule the pod in any location, but giving higher
|
||||
precedence to topologies that would help reduce the
|
||||
skew. A constraint is considered "Unsatisfiable" for
|
||||
an incoming pod if and only if every possible node
|
||||
assignment for that pod would violate "MaxSkew" on
|
||||
some topology. For example, in a 3-zone cluster, MaxSkew
|
||||
is set to 1, and pods with the same labelSelector
|
||||
spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P
|
||||
| P | P | If WhenUnsatisfiable is set to DoNotSchedule,
|
||||
incoming pod can only be scheduled to zone2(zone3)
|
||||
to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3)
|
||||
satisfies MaxSkew(1). In other words, the cluster
|
||||
can still be imbalanced, but scheduler won''t make
|
||||
it *more* imbalanced. It''s a required field.'
|
||||
type: string
|
||||
required:
|
||||
- maxSkew
|
||||
- topologyKey
|
||||
- whenUnsatisfiable
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
ingress:
|
||||
description: Defining the options for an Optional Ingress which
|
||||
@@ -328,6 +516,14 @@ spec:
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
dataStore:
|
||||
description: DataStore allows to specify a DataStore that should be
|
||||
used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
This parameter is optional and acts as an override over the default
|
||||
one which is used by the Kamaji Operator. Migration from a different
|
||||
DataStore to another one is not yet supported and the reconciliation
|
||||
will be blocked.
|
||||
type: string
|
||||
kubernetes:
|
||||
description: Kubernetes specification for tenant control plane
|
||||
properties:
|
||||
@@ -1308,6 +1504,8 @@ spec:
|
||||
secretName:
|
||||
type: string
|
||||
type: object
|
||||
dataStoreName:
|
||||
type: string
|
||||
driver:
|
||||
type: string
|
||||
setup:
|
||||
|
||||
@@ -230,6 +230,74 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
topologySpreadConstraints:
|
||||
description: TopologySpreadConstraints describes how the Tenant Control Plane pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. In case of nil underlying LabelSelector, the Kamaji one for the given Tenant Control Plane will be used. All topologySpreadConstraints are ANDed.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
|
||||
properties:
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
matchLabelKeys:
|
||||
description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
maxSkew:
|
||||
description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.'
|
||||
format: int32
|
||||
type: integer
|
||||
minDomains:
|
||||
description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)."
|
||||
format: int32
|
||||
type: integer
|
||||
nodeAffinityPolicy:
|
||||
description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
nodeTaintsPolicy:
|
||||
description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag."
|
||||
type: string
|
||||
topologyKey:
|
||||
description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field.
|
||||
type: string
|
||||
whenUnsatisfiable:
|
||||
description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.'
|
||||
type: string
|
||||
required:
|
||||
- maxSkew
|
||||
- topologyKey
|
||||
- whenUnsatisfiable
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
ingress:
|
||||
description: Defining the options for an Optional Ingress which will expose API Server of the Tenant Control Plane
|
||||
@@ -280,6 +348,9 @@ spec:
|
||||
required:
|
||||
- service
|
||||
type: object
|
||||
dataStore:
|
||||
description: DataStore allows to specify a DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane. This parameter is optional and acts as an override over the default one which is used by the Kamaji Operator. Migration from a different DataStore to another one is not yet supported and the reconciliation will be blocked.
|
||||
type: string
|
||||
kubernetes:
|
||||
description: Kubernetes specification for tenant control plane
|
||||
properties:
|
||||
@@ -1068,6 +1139,8 @@ spec:
|
||||
secretName:
|
||||
type: string
|
||||
type: object
|
||||
dataStoreName:
|
||||
type: string
|
||||
driver:
|
||||
type: string
|
||||
setup:
|
||||
|
||||
@@ -5,18 +5,30 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/indexers"
|
||||
)
|
||||
|
||||
const (
|
||||
dataStoreFinalizer = "finalizer.kamaji.clastix.io/datastore"
|
||||
)
|
||||
|
||||
type DataStore struct {
|
||||
@@ -25,39 +37,87 @@ type DataStore struct {
|
||||
// if a Data Source is updated we have to be sure that the reconciliation of the certificates content
|
||||
// for each Tenant Control Plane is put in place properly.
|
||||
TenantControlPlaneTrigger TenantControlPlaneChannel
|
||||
// ResourceName is the DataStore object that should be watched for changes.
|
||||
ResourceName string
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=kamaji.clastix.io,resources=datastores,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=kamaji.clastix.io,resources=datastores/status,verbs=get;update;patch
|
||||
|
||||
func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
ds := kamajiv1alpha1.DataStore{}
|
||||
if err := r.client.Get(ctx, request.NamespacedName, &ds); err != nil {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
if err := r.client.Get(ctx, request.NamespacedName, ds); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "unable to retrieve the request")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Managing the finalizer, required to don't drop a DataSource if this is still used by a Tenant Control Plane.
|
||||
switch {
|
||||
case ds.DeletionTimestamp != nil && controllerutil.ContainsFinalizer(ds, dataStoreFinalizer):
|
||||
log.Info("marked for deletion, checking conditions")
|
||||
|
||||
if len(ds.Status.UsedBy) == 0 {
|
||||
log.Info("resource is no more used by any Tenant Control Plane")
|
||||
|
||||
controllerutil.RemoveFinalizer(ds, dataStoreFinalizer)
|
||||
|
||||
return reconcile.Result{}, r.client.Update(ctx, ds)
|
||||
}
|
||||
|
||||
log.Info("DataStore is still used by some Tenant Control Planes, cannot be removed")
|
||||
case ds.DeletionTimestamp == nil && !controllerutil.ContainsFinalizer(ds, dataStoreFinalizer):
|
||||
log.Info("the resource is missing the required finalizer, adding it")
|
||||
|
||||
controllerutil.AddFinalizer(ds, dataStoreFinalizer)
|
||||
|
||||
return reconcile.Result{}, r.client.Update(ctx, ds)
|
||||
}
|
||||
// A Data Source can trigger several Tenant Control Planes and requires a minimum validation:
|
||||
// we have to ensure the data provided by the Data Source is valid and referencing an existing Secret object.
|
||||
if _, err := ds.Spec.TLSConfig.CertificateAuthority.Certificate.GetContent(ctx, r.client); err != nil {
|
||||
return reconcile.Result{}, errors.Wrap(err, "invalid Certificate Authority data")
|
||||
log.Error(err, "invalid Certificate Authority data")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if ds.Spec.Driver == kamajiv1alpha1.EtcdDriver {
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey == nil {
|
||||
err := fmt.Errorf("a valid private key is required for the etcd driver")
|
||||
|
||||
log.Error(err, "missing Certificate Authority private key data")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if _, err := ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.GetContent(ctx, r.client); err != nil {
|
||||
log.Error(err, "invalid Certificate Authority private key data")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := ds.Spec.TLSConfig.ClientCertificate.Certificate.GetContent(ctx, r.client); err != nil {
|
||||
return reconcile.Result{}, errors.Wrap(err, "invalid Client Certificate data")
|
||||
log.Error(err, "invalid Client Certificate data")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if _, err := ds.Spec.TLSConfig.ClientCertificate.PrivateKey.GetContent(ctx, r.client); err != nil {
|
||||
return reconcile.Result{}, errors.Wrap(err, "invalid Client Certificate data")
|
||||
log.Error(err, "invalid Client Certificate private key data")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
tcpList := kamajiv1alpha1.TenantControlPlaneList{}
|
||||
|
||||
if err := r.client.List(ctx, &tcpList); err != nil {
|
||||
if err := r.client.List(ctx, &tcpList, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(indexers.TenantControlPlaneUsedDataStoreKey, ds.GetName()),
|
||||
}); err != nil {
|
||||
log.Error(err, "cannot retrieve list of the Tenant Control Plane using the following instance")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Updating the status with the list of Tenant Control Plane using the following Data Source
|
||||
@@ -68,7 +128,9 @@ func (r *DataStore) Reconcile(ctx context.Context, request reconcile.Request) (r
|
||||
|
||||
ds.Status.UsedBy = tcpSets.List()
|
||||
|
||||
if err := r.client.Status().Update(ctx, &ds); err != nil {
|
||||
if err := r.client.Status().Update(ctx, ds); err != nil {
|
||||
log.Error(err, "cannot update the status for the given instance")
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Triggering the reconciliation of the Tenant Control Plane upon a Secret change
|
||||
@@ -88,9 +150,31 @@ func (r *DataStore) InjectClient(client client.Client) error {
|
||||
}
|
||||
|
||||
func (r *DataStore) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
enqueueFn := func(tcp *kamajiv1alpha1.TenantControlPlane, limitingInterface workqueue.RateLimitingInterface) {
|
||||
if dataStoreName := tcp.Status.Storage.DataStoreName; len(dataStoreName) > 0 {
|
||||
limitingInterface.AddRateLimited(reconcile.Request{
|
||||
NamespacedName: k8stypes.NamespacedName{
|
||||
Name: dataStoreName,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
//nolint:forcetypeassert
|
||||
return controllerruntime.NewControllerManagedBy(mgr).
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return object.GetName() == r.ResourceName
|
||||
}))).
|
||||
For(&kamajiv1alpha1.DataStore{}, builder.WithPredicates(
|
||||
predicate.ResourceVersionChangedPredicate{},
|
||||
)).
|
||||
Watches(&source.Kind{Type: &kamajiv1alpha1.TenantControlPlane{}}, handler.Funcs{
|
||||
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(createEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(updateEvent.ObjectOld.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
enqueueFn(updateEvent.ObjectNew.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
enqueueFn(deleteEvent.Object.(*kamajiv1alpha1.TenantControlPlane), limitingInterface)
|
||||
},
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func getKubernetesStorageResources(c client.Client, dbConnection datastore.Conne
|
||||
&ds.Config{
|
||||
Client: c,
|
||||
ConnString: dbConnection.GetConnectionString(),
|
||||
Driver: dbConnection.Driver(),
|
||||
DataStore: datastore,
|
||||
},
|
||||
&ds.Setup{
|
||||
Client: c,
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
finalizer = "finalizer.kamaji.clastix.io"
|
||||
tenantControlPlaneFinalizer = "finalizer.kamaji.clastix.io"
|
||||
)
|
||||
|
||||
// TenantControlPlaneReconciler reconciles a TenantControlPlane object.
|
||||
@@ -42,9 +42,9 @@ type TenantControlPlaneReconciler struct {
|
||||
|
||||
// TenantControlPlaneReconcilerConfig gives the necessary configuration for TenantControlPlaneReconciler.
|
||||
type TenantControlPlaneReconcilerConfig struct {
|
||||
DataStoreName string
|
||||
KineContainerImage string
|
||||
TmpBaseDirectory string
|
||||
DefaultDataStoreName string
|
||||
KineContainerImage string
|
||||
TmpBaseDirectory string
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -62,6 +62,8 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
tenantControlPlane := &kamajiv1alpha1.TenantControlPlane{}
|
||||
isTenantControlPlane, err := r.getTenantControlPlane(ctx, req.NamespacedName, tenantControlPlane)
|
||||
if err != nil {
|
||||
log.Error(err, "cannot retrieve the required instance")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !isTenantControlPlane {
|
||||
@@ -69,19 +71,23 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
}
|
||||
|
||||
markedToBeDeleted := tenantControlPlane.GetDeletionTimestamp() != nil
|
||||
hasFinalizer := hasFinalizer(*tenantControlPlane)
|
||||
hasFinalizer := controllerutil.ContainsFinalizer(tenantControlPlane, tenantControlPlaneFinalizer)
|
||||
|
||||
if markedToBeDeleted && !hasFinalizer {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// Retrieving the DataStore to use for the current reconciliation
|
||||
ds, err := r.dataStore(ctx, tenantControlPlane)
|
||||
if err != nil {
|
||||
log.Error(err, "cannot retrieve the DataStore for the given instance")
|
||||
|
||||
ds := kamajiv1alpha1.DataStore{}
|
||||
if err = r.Client.Get(ctx, k8stypes.NamespacedName{Name: r.Config.DataStoreName}, &ds); err != nil {
|
||||
return ctrl.Result{}, errors.Wrap(err, "cannot retrieve kamajiv1alpha.DataStore object")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
dsConnection, err := r.getStorageConnection(ctx, ds)
|
||||
dsConnection, err := r.getStorageConnection(ctx, *ds)
|
||||
if err != nil {
|
||||
log.Error(err, "cannot generate the DataStore connection for the given instance")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
defer dsConnection.Close()
|
||||
@@ -100,6 +106,8 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
|
||||
for _, resource := range registeredDeletableResources {
|
||||
if err = resources.HandleDeletion(ctx, resource, tenantControlPlane); err != nil {
|
||||
log.Error(err, "resource deletion failed", "resource", resource.GetName())
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -108,6 +116,8 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
log.Info("removing finalizer")
|
||||
|
||||
if err = r.RemoveFinalizer(ctx, tenantControlPlane); err != nil {
|
||||
log.Error(err, "cannot remove the finalizer for the given resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -126,7 +136,7 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
log: log,
|
||||
tcpReconcilerConfig: r.Config,
|
||||
tenantControlPlane: *tenantControlPlane,
|
||||
DataStore: ds,
|
||||
DataStore: *ds,
|
||||
Connection: dsConnection,
|
||||
}
|
||||
registeredResources := GetResources(groupResourceBuilderConfiguration)
|
||||
@@ -140,6 +150,8 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "handling of resource failed", "resource", resource.GetName())
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -148,6 +160,8 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
}
|
||||
|
||||
if err := r.updateStatus(ctx, req.NamespacedName, resource); err != nil {
|
||||
log.Error(err, "update of the resource failed", "resource", resource.GetName())
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -215,24 +229,34 @@ func (r *TenantControlPlaneReconciler) updateStatus(ctx context.Context, namespa
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasFinalizer(tenantControlPlane kamajiv1alpha1.TenantControlPlane) bool {
|
||||
for _, f := range tenantControlPlane.GetFinalizers() {
|
||||
if f == finalizer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *TenantControlPlaneReconciler) AddFinalizer(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
|
||||
controllerutil.AddFinalizer(tenantControlPlane, finalizer)
|
||||
controllerutil.AddFinalizer(tenantControlPlane, tenantControlPlaneFinalizer)
|
||||
|
||||
return r.Update(ctx, tenantControlPlane)
|
||||
}
|
||||
|
||||
func (r *TenantControlPlaneReconciler) RemoveFinalizer(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
|
||||
controllerutil.RemoveFinalizer(tenantControlPlane, finalizer)
|
||||
controllerutil.RemoveFinalizer(tenantControlPlane, tenantControlPlaneFinalizer)
|
||||
|
||||
return r.Update(ctx, tenantControlPlane)
|
||||
}
|
||||
|
||||
// dataStore retrieves the override DataStore for the given Tenant Control Plane if specified,
|
||||
// otherwise fallback to the default one specified in the Kamaji setup.
|
||||
func (r *TenantControlPlaneReconciler) dataStore(ctx context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) (*kamajiv1alpha1.DataStore, error) {
|
||||
dataStoreName := tenantControlPlane.Spec.DataStore
|
||||
if len(dataStoreName) == 0 {
|
||||
dataStoreName = r.Config.DefaultDataStoreName
|
||||
}
|
||||
|
||||
if statusDataStore := tenantControlPlane.Status.Storage.DataStoreName; len(statusDataStore) > 0 && dataStoreName != statusDataStore {
|
||||
return nil, fmt.Errorf("migration from a DataStore (current: %s) to another one (desired: %s) is not yet supported", statusDataStore, dataStoreName)
|
||||
}
|
||||
|
||||
ds := &kamajiv1alpha1.DataStore{}
|
||||
if err := r.Client.Get(ctx, k8stypes.NamespacedName{Name: dataStoreName}, ds); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot retrieve *kamajiv1alpha.DataStore object")
|
||||
}
|
||||
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
12
indexers/indexer.go
Normal file
12
indexers/indexer.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package indexers
|
||||
|
||||
import "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
type Indexer interface {
|
||||
Object() client.Object
|
||||
Field() string
|
||||
ExtractValue() client.IndexerFunc
|
||||
}
|
||||
40
indexers/tcp_useddatastore.go
Normal file
40
indexers/tcp_useddatastore.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
TenantControlPlaneUsedDataStoreKey = "status.storage.dataStoreName"
|
||||
)
|
||||
|
||||
type TenantControlPlaneStatusDataStore struct{}
|
||||
|
||||
func (t *TenantControlPlaneStatusDataStore) Object() client.Object {
|
||||
return &kamajiv1alpha1.TenantControlPlane{}
|
||||
}
|
||||
|
||||
func (t *TenantControlPlaneStatusDataStore) Field() string {
|
||||
return TenantControlPlaneUsedDataStoreKey
|
||||
}
|
||||
|
||||
func (t *TenantControlPlaneStatusDataStore) ExtractValue() client.IndexerFunc {
|
||||
return func(object client.Object) []string {
|
||||
//nolint:forcetypeassert
|
||||
tcp := object.(*kamajiv1alpha1.TenantControlPlane)
|
||||
|
||||
return []string{tcp.Status.Storage.DataStoreName}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TenantControlPlaneStatusDataStore) SetupWithManager(ctx context.Context, mgr controllerruntime.Manager) error {
|
||||
return mgr.GetFieldIndexer().IndexField(ctx, t.Object(), t.Field(), t.ExtractValue())
|
||||
}
|
||||
@@ -772,6 +772,18 @@ func (d *Deployment) SetAnnotations(resource *appsv1.Deployment, annotations map
|
||||
resource.SetAnnotations(annotations)
|
||||
}
|
||||
|
||||
func (d *Deployment) SetTopologySpreadConstraints(spec *appsv1.DeploymentSpec, topologies []corev1.TopologySpreadConstraint) {
|
||||
defaultSelector := spec.Selector
|
||||
|
||||
for index, topology := range topologies {
|
||||
if topology.LabelSelector == nil {
|
||||
topologies[index].LabelSelector = defaultSelector
|
||||
}
|
||||
}
|
||||
|
||||
spec.Template.Spec.TopologySpreadConstraints = topologies
|
||||
}
|
||||
|
||||
// ResetKubeAPIServerFlags ensures that upon a change of the kube-apiserver extra flags the desired ones are properly
|
||||
// applied, also considering that the container could be lately patched by the konnectivity addon resources.
|
||||
func (d *Deployment) ResetKubeAPIServerFlags(resource *appsv1.Deployment, tcp *kamajiv1alpha1.TenantControlPlane) {
|
||||
|
||||
@@ -20,12 +20,12 @@ type Config struct {
|
||||
resource *corev1.Secret
|
||||
Client client.Client
|
||||
ConnString string
|
||||
Driver string
|
||||
DataStore kamajiv1alpha1.DataStore
|
||||
}
|
||||
|
||||
func (r *Config) ShouldStatusBeUpdated(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) bool {
|
||||
return tenantControlPlane.Status.Storage.Config.Checksum != r.resource.GetAnnotations()["checksum"] ||
|
||||
tenantControlPlane.Status.Storage.Driver != r.Driver
|
||||
tenantControlPlane.Status.Storage.DataStoreName != r.DataStore.GetName()
|
||||
}
|
||||
|
||||
func (r *Config) ShouldCleanup(*kamajiv1alpha1.TenantControlPlane) bool {
|
||||
@@ -64,7 +64,8 @@ func (r *Config) GetName() string {
|
||||
}
|
||||
|
||||
func (r *Config) UpdateTenantControlPlaneStatus(_ context.Context, tenantControlPlane *kamajiv1alpha1.TenantControlPlane) error {
|
||||
tenantControlPlane.Status.Storage.Driver = r.Driver
|
||||
tenantControlPlane.Status.Storage.Driver = string(r.DataStore.Spec.Driver)
|
||||
tenantControlPlane.Status.Storage.DataStoreName = r.DataStore.GetName()
|
||||
tenantControlPlane.Status.Storage.Config.SecretName = r.resource.GetName()
|
||||
tenantControlPlane.Status.Storage.Config.Checksum = r.resource.GetAnnotations()["checksum"]
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ func (r *KubernetesDeploymentResource) mutate(ctx context.Context, tenantControl
|
||||
d.SetTemplateLabels(&r.resource.Spec.Template, r.deploymentTemplateLabels(ctx, tenantControlPlane))
|
||||
d.SetStrategy(&r.resource.Spec)
|
||||
d.SetSelector(&r.resource.Spec, tenantControlPlane)
|
||||
d.SetTopologySpreadConstraints(&r.resource.Spec, tenantControlPlane.Spec.ControlPlane.Deployment.TopologySpreadConstraints)
|
||||
d.SetReplicas(&r.resource.Spec, tenantControlPlane)
|
||||
d.ResetKubeAPIServerFlags(r.resource, tenantControlPlane)
|
||||
d.SetContainers(&r.resource.Spec.Template.Spec, tenantControlPlane, address)
|
||||
|
||||
@@ -27,6 +27,7 @@ type Resource interface {
|
||||
}
|
||||
|
||||
type DeleteableResource interface {
|
||||
GetName() string
|
||||
Define(context.Context, *kamajiv1alpha1.TenantControlPlane) error
|
||||
Delete(context.Context, *kamajiv1alpha1.TenantControlPlane) error
|
||||
}
|
||||
|
||||
18
main.go
18
main.go
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
"github.com/clastix/kamaji/controllers"
|
||||
"github.com/clastix/kamaji/indexers"
|
||||
"github.com/clastix/kamaji/internal"
|
||||
"github.com/clastix/kamaji/internal/config"
|
||||
)
|
||||
@@ -35,6 +36,8 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
|
||||
conf, err := config.InitConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading configuration.")
|
||||
@@ -61,7 +64,7 @@ func main() {
|
||||
|
||||
tcpChannel := make(controllers.TenantControlPlaneChannel)
|
||||
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel, ResourceName: conf.GetString("datastore")}).SetupWithManager(mgr); err != nil {
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -70,9 +73,9 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Config: controllers.TenantControlPlaneReconcilerConfig{
|
||||
DataStoreName: conf.GetString("datastore"),
|
||||
KineContainerImage: conf.GetString("kine-image"),
|
||||
TmpBaseDirectory: conf.GetString("tmp-directory"),
|
||||
DefaultDataStoreName: conf.GetString("datastore"),
|
||||
KineContainerImage: conf.GetString("kine-image"),
|
||||
TmpBaseDirectory: conf.GetString("tmp-directory"),
|
||||
},
|
||||
TriggerChan: tcpChannel,
|
||||
}
|
||||
@@ -82,6 +85,11 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&indexers.TenantControlPlaneStatusDataStore{}).SetupWithManager(ctx, mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create indexer", "indexer", "TenantControlPlaneStatusDataStore")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
//+kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
@@ -94,7 +102,7 @@ func main() {
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user