mirror of
https://github.com/open-cluster-management-io/ocm.git
synced 2026-02-14 18:09:57 +00:00
addon consume rollout helpers (#225)
Signed-off-by: haoqing0110 <qhao@redhat.com>
This commit is contained in:
@@ -49,16 +49,17 @@ spec:
|
||||
description: DeployOption contains the options of deploying a klusterlet
|
||||
properties:
|
||||
mode:
|
||||
description: 'Mode can be Default, Hosted or Singleton. It is
|
||||
Default mode if not specified In Default mode, all klusterlet
|
||||
description: 'Mode can be Default, Hosted, Singleton or SingletonHosted.
|
||||
It is Default mode if not specified In Default mode, all klusterlet
|
||||
related resources are deployed on the managed cluster. In Hosted
|
||||
mode, only crd and configurations are installed on the spoke/managed
|
||||
cluster. Controllers run in another cluster (defined as management-cluster)
|
||||
and connect to the mangaged cluster with the kubeconfig in secret
|
||||
of "external-managed-kubeconfig"(a kubeconfig of managed-cluster
|
||||
with cluster-admin permission). In Singleton mode, registration/work
|
||||
agent is started as a single deployment. Note: Do not modify
|
||||
the Mode field once it''s applied.'
|
||||
agent is started as a single deployment. In SingletonHosted
|
||||
mode, agent is started as a single deployment in hosted mode.
|
||||
Note: Do not modify the Mode field once it''s applied.'
|
||||
type: string
|
||||
type: object
|
||||
externalServerURLs:
|
||||
|
||||
4
go.mod
4
go.mod
@@ -27,8 +27,8 @@ require (
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/kube-aggregator v0.27.2
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230911140813-9676b4f8c180
|
||||
open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230920005921-65bcbb446df8
|
||||
open-cluster-management.io/api v0.11.1-0.20230919033310-0146ddfab71c
|
||||
sigs.k8s.io/controller-runtime v0.15.0
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.5
|
||||
)
|
||||
|
||||
10
go.sum
10
go.sum
@@ -352,8 +352,8 @@ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@@ -1156,10 +1156,10 @@ k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230911140813-9676b4f8c180 h1:fEqGXqPJhBUSkeXXlJaAalX36F9NWRcv1pdbj2tYCfk=
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230911140813-9676b4f8c180/go.mod h1:8ESgg9EzyUZ2n5/Qgl8E2jnMmnd02YxXn92K5+Egedc=
|
||||
open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 h1:3zbT3sT/tEAQbpjIk6uRiTQGknQ3kQlfd11ElVuXyyQ=
|
||||
open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83/go.mod h1:nsQ/G5JpfjQUg7dHpblyywWC6BRqklNaF6fIswVCHyY=
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230920005921-65bcbb446df8 h1:cVyjsSeboWwgg2bkMU2s78hkUTK3LzVyQMWwEf+/gRw=
|
||||
open-cluster-management.io/addon-framework v0.7.1-0.20230920005921-65bcbb446df8/go.mod h1:xdIh8sARZ7zoH/KvHp9ATYoousIdotI+Js0VZt0+qtc=
|
||||
open-cluster-management.io/api v0.11.1-0.20230919033310-0146ddfab71c h1:p73vRGhWgBucvoYmMHKlVjABz5SWBT0rmfzKqXnF1I0=
|
||||
open-cluster-management.io/api v0.11.1-0.20230919033310-0146ddfab71c/go.mod h1:/CZhelEH+30/pX7vXGSZOzLMX0zvjthYOkT/5ZTzVTQ=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
||||
@@ -139,108 +139,139 @@ spec:
|
||||
type: string
|
||||
rolloutStrategy:
|
||||
default:
|
||||
type: UpdateAll
|
||||
type: All
|
||||
description: The rollout strategy to apply addon configurations
|
||||
change. The rollout strategy only watches the addon configurations
|
||||
defined in ClusterManagementAddOn.
|
||||
properties:
|
||||
rollingUpdate:
|
||||
description: Rolling update with placement config params.
|
||||
Present only if the type is RollingUpdate.
|
||||
all:
|
||||
description: All define required fields for RolloutStrategy
|
||||
type All
|
||||
properties:
|
||||
maxConcurrency:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
default: 25%
|
||||
description: 'The maximum concurrently updating
|
||||
number of clusters. Value can be an absolute number
|
||||
(ex: 5) or a percentage of desired addons (ex:
|
||||
10%). Absolute number is calculated from percentage
|
||||
by rounding up. Defaults to 25%. Example: when
|
||||
this is set to 30%, once the addon configs change,
|
||||
the addon on 30% of the selected clusters will
|
||||
adopt the new configs. When the addons with new
|
||||
configs are healthy, the addon on the remaining
|
||||
clusters will be further updated.'
|
||||
x-kubernetes-int-or-string: true
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
rollingUpdateWithCanary:
|
||||
description: Rolling update with placement config params.
|
||||
Present only if the type is RollingUpdateWithCanary.
|
||||
progressive:
|
||||
description: Progressive define required fields for
|
||||
RolloutStrategy type Progressive
|
||||
properties:
|
||||
mandatoryDecisionGroups:
|
||||
description: List of the decision groups names or
|
||||
indexes to apply the workload first and fail if
|
||||
workload did not reach successful state. GroupName
|
||||
or GroupIndex must match with the decisionGroups
|
||||
defined in the placement's decisionStrategy
|
||||
items:
|
||||
description: MandatoryDecisionGroup set the decision
|
||||
group name or group index. GroupName is considered
|
||||
first to select the decisionGroups then GroupIndex.
|
||||
properties:
|
||||
groupIndex:
|
||||
description: GroupIndex of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-index
|
||||
format: int32
|
||||
type: integer
|
||||
groupName:
|
||||
description: GroupName of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-name
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
maxConcurrency:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
default: 25%
|
||||
description: 'The maximum concurrently updating
|
||||
number of clusters. Value can be an absolute number
|
||||
(ex: 5) or a percentage of desired addons (ex:
|
||||
10%). Absolute number is calculated from percentage
|
||||
by rounding up. Defaults to 25%. Example: when
|
||||
this is set to 30%, once the addon configs change,
|
||||
the addon on 30% of the selected clusters will
|
||||
adopt the new configs. When the addons with new
|
||||
configs are healthy, the addon on the remaining
|
||||
clusters will be further updated.'
|
||||
description: MaxConcurrency is the max number of
|
||||
clusters to deploy workload concurrently. The
|
||||
default value for MaxConcurrency is determined
|
||||
from the clustersPerDecisionGroup defined in the
|
||||
placement->DecisionStrategy.
|
||||
pattern: ^((100|[0-9]{1,2})%|[0-9]+)$
|
||||
x-kubernetes-int-or-string: true
|
||||
placement:
|
||||
description: Canary placement reference.
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of the placement
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of the
|
||||
placement
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- namespace
|
||||
type: object
|
||||
required:
|
||||
- placement
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
progressivePerGroup:
|
||||
description: ProgressivePerGroup define required fields
|
||||
for RolloutStrategy type ProgressivePerGroup
|
||||
properties:
|
||||
mandatoryDecisionGroups:
|
||||
description: List of the decision groups names or
|
||||
indexes to apply the workload first and fail if
|
||||
workload did not reach successful state. GroupName
|
||||
or GroupIndex must match with the decisionGroups
|
||||
defined in the placement's decisionStrategy
|
||||
items:
|
||||
description: MandatoryDecisionGroup set the decision
|
||||
group name or group index. GroupName is considered
|
||||
first to select the decisionGroups then GroupIndex.
|
||||
properties:
|
||||
groupIndex:
|
||||
description: GroupIndex of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-index
|
||||
format: int32
|
||||
type: integer
|
||||
groupName:
|
||||
description: GroupName of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-name
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
type:
|
||||
default: UpdateAll
|
||||
description: "Type is the type of the rollout strategy,
|
||||
it supports UpdateAll, RollingUpdate and RollingUpdateWithCanary:
|
||||
- UpdateAll: when configs change, apply the new configs
|
||||
to all the selected clusters at once. This is the
|
||||
default strategy. - RollingUpdate: when configs change,
|
||||
apply the new configs to all the selected clusters
|
||||
with the concurrence rate defined in MaxConcurrency.
|
||||
- RollingUpdateWithCanary: when configs change, wait
|
||||
and check if add-ons on the canary placement selected
|
||||
clusters have applied the new configs and are healthy,
|
||||
then apply the new configs to all the selected clusters
|
||||
with the concurrence rate defined in MaxConcurrency.
|
||||
\n The field lastKnownGoodConfig in the status record
|
||||
the last successfully applied spec hash of canary
|
||||
placement. If the config spec hash changes after the
|
||||
canary is passed and before the rollout is done, the
|
||||
current rollout will continue, then roll out to the
|
||||
latest change. \n For example, the addon configs have
|
||||
spec hash A. The canary is passed and the lastKnownGoodConfig
|
||||
would be A, and all the selected clusters are rolling
|
||||
out to A. Then the config spec hash changes to B.
|
||||
At this time, the clusters will continue rolling out
|
||||
to A. When the rollout is done and canary passed B,
|
||||
the lastKnownGoodConfig would be B and all the clusters
|
||||
will start rolling out to B. \n The canary placement
|
||||
does not have to be a subset of the install placement,
|
||||
and it is more like a reference for finding and checking
|
||||
canary clusters before upgrading all. To trigger the
|
||||
rollout on the canary clusters, you can define another
|
||||
rollout strategy with the type RollingUpdate, or even
|
||||
manually upgrade the addons on those clusters."
|
||||
default: All
|
||||
description: Rollout strategy Types are All, Progressive
|
||||
and ProgressivePerGroup 1) All means apply the workload
|
||||
to all clusters in the decision groups at once. 2)
|
||||
Progressive means apply the workload to the selected
|
||||
clusters progressively per cluster. The workload will
|
||||
not be applied to the next cluster unless one of the
|
||||
current applied clusters reach the successful state
|
||||
or timeout. 3) ProgressivePerGroup means apply the
|
||||
workload to decisionGroup clusters progressively per
|
||||
group. The workload will not be applied to the next
|
||||
decisionGroup unless all clusters in the current group
|
||||
reach the successful state or timeout.
|
||||
enum:
|
||||
- UpdateAll
|
||||
- RollingUpdate
|
||||
- RollingUpdateWithCanary
|
||||
- All
|
||||
- Progressive
|
||||
- ProgressivePerGroup
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
||||
@@ -19,7 +19,7 @@ func (d *managedClusterAddonConfigurationReconciler) reconcile(
|
||||
ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) {
|
||||
var errs []error
|
||||
|
||||
for _, addon := range graph.addonToUpdate() {
|
||||
for _, addon := range graph.getAddonsToUpdate() {
|
||||
mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs)
|
||||
patcher := patcher.NewPatcher[
|
||||
*addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus](
|
||||
|
||||
@@ -23,7 +23,10 @@ import (
|
||||
addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions"
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
)
|
||||
|
||||
func TestAddonConfigReconcile(t *testing.T) {
|
||||
@@ -93,7 +96,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}},
|
||||
@@ -110,7 +116,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash",
|
||||
},
|
||||
}).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -152,7 +159,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test2"},
|
||||
}}, nil),
|
||||
}}, nil, nil),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
@@ -163,7 +170,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
@@ -180,7 +190,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash",
|
||||
},
|
||||
}).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -227,7 +238,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
}}),
|
||||
}}, nil),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
@@ -237,7 +248,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -248,7 +262,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"},
|
||||
}).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -285,7 +300,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
}}),
|
||||
}}, nil),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
@@ -295,7 +310,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -306,7 +324,8 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"},
|
||||
}).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}},
|
||||
@@ -346,7 +365,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
}}),
|
||||
}}, nil),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
@@ -356,7 +375,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -371,6 +393,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -386,7 +409,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
validateAddonActions: addontesting.AssertNoActions,
|
||||
},
|
||||
{
|
||||
name: "placement rolling update with MaxConcurrency 1",
|
||||
name: "placement rollout progressive with MaxConcurrency 1",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
@@ -400,7 +423,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}},
|
||||
@@ -409,9 +435,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: addonv1alpha1.RolloutStrategy{
|
||||
Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate,
|
||||
RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromInt(1)}},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.Progressive,
|
||||
Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromInt(1)}},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -439,7 +465,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "placement rolling update with MaxConcurrency 0",
|
||||
name: "placement rollout progressive with MaxConcurrency 50%",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
@@ -453,49 +479,11 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: addonv1alpha1.RolloutStrategy{
|
||||
Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate,
|
||||
RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromString("0%")}},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &v1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: addontesting.AssertNoActions,
|
||||
},
|
||||
{
|
||||
name: "placement rolling update with default MaxConcurrency",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
},
|
||||
placementDecisions: []runtime.Object{
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}},
|
||||
},
|
||||
@@ -503,9 +491,9 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: addonv1alpha1.RolloutStrategy{
|
||||
Type: addonv1alpha1.AddonRolloutStrategyRollingUpdate,
|
||||
RollingUpdate: &addonv1alpha1.RollingUpdate{MaxConcurrency: defaultMaxConcurrency}},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.Progressive,
|
||||
Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromString("50%")}},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
@@ -532,6 +520,321 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
}})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "placement rollout progressive with default MaxConcurrency 100%",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
},
|
||||
placementDecisions: []runtime.Object{
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.Progressive,
|
||||
Progressive: &clusterv1alpha1.RolloutProgressive{}},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &v1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "placement rollout progressive with mandatory decision groups",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
},
|
||||
placementDecisions: []runtime.Object{
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-0",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupNameLabel: "group1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
},
|
||||
},
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "1",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(
|
||||
addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.Progressive,
|
||||
Progressive: &clusterv1alpha1.RolloutProgressive{
|
||||
MandatoryDecisionGroups: clusterv1alpha1.MandatoryDecisionGroups{
|
||||
MandatoryDecisionGroups: []clusterv1alpha1.MandatoryDecisionGroup{
|
||||
{GroupName: "group1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &v1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "placement rollout progressive per group",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
},
|
||||
placementDecisions: []runtime.Object{
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-0",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
},
|
||||
},
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "1",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(
|
||||
addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.ProgressivePerGroup,
|
||||
}}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &v1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "placement rollout progressive per group with mandatory decision groups",
|
||||
managedClusteraddon: []runtime.Object{
|
||||
addontesting.NewAddon("test", "cluster1"),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}},
|
||||
},
|
||||
placementDecisions: []runtime.Object{
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-0",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupNameLabel: "group1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
},
|
||||
},
|
||||
&clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement-1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "1",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(
|
||||
addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.ProgressivePerGroup,
|
||||
ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{
|
||||
MandatoryDecisionGroups: clusterv1alpha1.MandatoryDecisionGroups{
|
||||
MandatoryDecisionGroups: []clusterv1alpha1.MandatoryDecisionGroup{
|
||||
{GroupName: "group1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &v1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Build(),
|
||||
validateAddonActions: func(t *testing.T, actions []clienttesting.Action) {
|
||||
addontesting.AssertActions(t, actions, "patch", "patch")
|
||||
sort.Sort(byPatchName(actions))
|
||||
expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastObservedGeneration: 0,
|
||||
}})
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -572,7 +875,7 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
|
||||
controller := &addonConfigurationController{
|
||||
addonClient: fakeAddonClient,
|
||||
placementDecisionLister: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(),
|
||||
placementDecisionGetter: helpers.PlacementDecisionGetter{Client: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister()},
|
||||
placementLister: clusterInformers.Cluster().V1beta1().Placements().Lister(),
|
||||
clusterManagementAddonLister: addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Lister(),
|
||||
managedClusterAddonIndexer: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetIndexer(),
|
||||
@@ -586,6 +889,10 @@ func TestAddonConfigReconcile(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when build graph: %v", err)
|
||||
}
|
||||
err = graph.generateRolloutResult()
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when refresh rollout result: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = reconcile.reconcile(context.TODO(), c.clusterManagementAddon, graph)
|
||||
if err != nil && !c.expectErr {
|
||||
@@ -614,10 +921,13 @@ func (a byPatchName) Less(i, j int) bool {
|
||||
func newManagedClusterAddon(
|
||||
name, namespace string,
|
||||
configs []addonv1alpha1.AddOnConfig,
|
||||
configStatus []addonv1alpha1.ConfigReference) *addonv1alpha1.ManagedClusterAddOn {
|
||||
configStatus []addonv1alpha1.ConfigReference,
|
||||
conditions []metav1.Condition,
|
||||
) *addonv1alpha1.ManagedClusterAddOn {
|
||||
mca := addontesting.NewAddon(name, namespace)
|
||||
mca.Spec.Configs = configs
|
||||
mca.Status.ConfigReferences = configStatus
|
||||
mca.Status.Conditions = conditions
|
||||
return mca
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/openshift/library-go/pkg/controller/factory"
|
||||
"github.com/openshift/library-go/pkg/operator/events"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -18,8 +17,8 @@ import (
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1"
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/common/patcher"
|
||||
"open-cluster-management.io/ocm/pkg/common/queue"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ type addonConfigurationController struct {
|
||||
managedClusterAddonIndexer cache.Indexer
|
||||
addonFilterFunc factory.EventFilterFunc
|
||||
placementLister clusterlisterv1beta1.PlacementLister
|
||||
placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister
|
||||
placementDecisionGetter helpers.PlacementDecisionGetter
|
||||
|
||||
reconcilers []addonConfigurationReconcile
|
||||
}
|
||||
@@ -65,7 +64,7 @@ func NewAddonConfigurationController(
|
||||
clusterManagementAddonLister: clusterManagementAddonInformers.Lister(),
|
||||
managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(),
|
||||
placementLister: placementInformer.Lister(),
|
||||
placementDecisionLister: placementDecisionInformer.Lister(),
|
||||
placementDecisionGetter: helpers.PlacementDecisionGetter{Client: placementDecisionInformer.Lister()},
|
||||
addonFilterFunc: addonFilterFunc,
|
||||
}
|
||||
|
||||
@@ -121,6 +120,13 @@ func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory
|
||||
return err
|
||||
}
|
||||
|
||||
// generate the rollout result before calling reconcile()
|
||||
// so that all the reconcilers are using the same rollout result
|
||||
err = graph.generateRolloutResult()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var state reconcileState
|
||||
var errs []error
|
||||
for _, reconciler := range c.reconcilers {
|
||||
@@ -156,51 +162,19 @@ func (c *addonConfigurationController) buildConfigurationGraph(logger klog.Logge
|
||||
// check each install strategy in status
|
||||
var errs []error
|
||||
for _, installProgression := range cma.Status.InstallProgressions {
|
||||
clusters, err := c.getClustersByPlacement(installProgression.PlacementRef.Name, installProgression.PlacementRef.Namespace)
|
||||
if errors.IsNotFound(err) {
|
||||
logger.V(2).Info("Placement not found for addon", "placementNamespace", installProgression.PlacementRef.Namespace,
|
||||
"placementName", installProgression.PlacementRef.Name, "addonName", cma.Name)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, installStrategy := range cma.Spec.InstallStrategy.Placements {
|
||||
if installStrategy.PlacementRef == installProgression.PlacementRef {
|
||||
graph.addPlacementNode(installStrategy, installProgression, clusters)
|
||||
if installStrategy.PlacementRef != installProgression.PlacementRef {
|
||||
continue
|
||||
}
|
||||
|
||||
// add placement node
|
||||
err = graph.addPlacementNode(installStrategy, installProgression, c.placementLister, c.placementDecisionGetter)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return graph, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *addonConfigurationController) getClustersByPlacement(name, namespace string) ([]string, error) {
|
||||
var clusters []string
|
||||
if c.placementLister == nil || c.placementDecisionLister == nil {
|
||||
return clusters, nil
|
||||
}
|
||||
_, err := c.placementLister.Placements(namespace).Get(name)
|
||||
if err != nil {
|
||||
return clusters, err
|
||||
}
|
||||
|
||||
decisionSelector := labels.SelectorFromSet(labels.Set{
|
||||
clusterv1beta1.PlacementLabel: name,
|
||||
})
|
||||
decisions, err := c.placementDecisionLister.PlacementDecisions(namespace).List(decisionSelector)
|
||||
if err != nil {
|
||||
return clusters, err
|
||||
}
|
||||
|
||||
for _, d := range decisions {
|
||||
for _, sd := range d.Status.Decisions {
|
||||
clusters = append(clusters, sd.ClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
@@ -2,20 +2,18 @@ package addonconfiguration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
)
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
var (
|
||||
defaultMaxConcurrency = intstr.FromString("25%")
|
||||
maxMaxConcurrency = intstr.FromString("100%")
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
)
|
||||
|
||||
// configurationTree is a 2 level snapshot tree on the configuration of addons
|
||||
@@ -30,9 +28,11 @@ type configurationGraph struct {
|
||||
|
||||
// installStrategyNode is a node in configurationGraph defined by a install strategy
|
||||
type installStrategyNode struct {
|
||||
placementRef addonv1alpha1.PlacementRef
|
||||
maxConcurrency intstr.IntOrString
|
||||
desiredConfigs addonConfigMap
|
||||
placementRef addonv1alpha1.PlacementRef
|
||||
pdTracker *clusterv1beta1.PlacementDecisionClustersTracker
|
||||
rolloutStrategy clusterv1alpha1.RolloutStrategy
|
||||
rolloutResult clusterv1alpha1.RolloutResult
|
||||
desiredConfigs addonConfigMap
|
||||
// children keeps a map of addons node as the children of this node
|
||||
children map[string]*addonNode
|
||||
clusters sets.Set[string]
|
||||
@@ -43,46 +43,63 @@ type installStrategyNode struct {
|
||||
type addonNode struct {
|
||||
desiredConfigs addonConfigMap
|
||||
mca *addonv1alpha1.ManagedClusterAddOn
|
||||
// record mca upgrade status
|
||||
mcaUpgradeStatus upgradeStatus
|
||||
status *clusterv1alpha1.ClusterRolloutStatus
|
||||
}
|
||||
|
||||
type upgradeStatus int
|
||||
|
||||
const (
|
||||
// mca desired configs not synced from desiredConfigs yet
|
||||
toupgrade upgradeStatus = iota
|
||||
// mca desired configs upgraded and last applied configs not upgraded
|
||||
upgrading
|
||||
// both desired configs and last applied configs are upgraded
|
||||
upgraded
|
||||
)
|
||||
|
||||
type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference
|
||||
|
||||
// set addon upgrade status
|
||||
func (n *addonNode) setUpgradeStatus() {
|
||||
// set addon rollout status
|
||||
func (n *addonNode) setRolloutStatus() {
|
||||
// desired configs doesn't match actual configs, set to ToApply
|
||||
if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
}
|
||||
|
||||
var progressingCond metav1.Condition
|
||||
for _, cond := range n.mca.Status.Conditions {
|
||||
if cond.Type == addonv1alpha1.ManagedClusterAddOnConditionProgressing {
|
||||
progressingCond = cond
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, actual := range n.mca.Status.ConfigReferences {
|
||||
if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok {
|
||||
// desired config spec hash doesn't match actual, set to ToApply
|
||||
if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
// desired config spec hash matches actual, but last applied config spec hash doesn't match actual
|
||||
} else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) {
|
||||
n.mcaUpgradeStatus = upgrading
|
||||
switch progressingCond.Reason {
|
||||
case addonv1alpha1.ProgressingReasonInstallFailed, addonv1alpha1.ProgressingReasonUpgradeFailed:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Failed, LastTransitionTime: &progressingCond.LastTransitionTime}
|
||||
case addonv1alpha1.ProgressingReasonInstalling, addonv1alpha1.ProgressingReasonUpgrading:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Progressing, LastTransitionTime: &progressingCond.LastTransitionTime}
|
||||
default:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Progressing}
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n.mcaUpgradeStatus = upgraded
|
||||
// succeed
|
||||
if progressingCond.Reason == addonv1alpha1.ProgressingReasonInstallSucceed || progressingCond.Reason == addonv1alpha1.ProgressingReasonUpgradeSucceed {
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{
|
||||
Status: clusterv1alpha1.Succeeded,
|
||||
LastTransitionTime: &progressingCond.LastTransitionTime,
|
||||
}
|
||||
} else {
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{
|
||||
Status: clusterv1alpha1.Succeeded,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d addonConfigMap) copy() addonConfigMap {
|
||||
@@ -97,7 +114,6 @@ func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferenc
|
||||
graph := &configurationGraph{
|
||||
nodes: []*installStrategyNode{},
|
||||
defaults: &installStrategyNode{
|
||||
maxConcurrency: maxMaxConcurrency,
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{},
|
||||
children: map[string]*addonNode{},
|
||||
},
|
||||
@@ -145,26 +161,53 @@ func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn
|
||||
func (g *configurationGraph) addPlacementNode(
|
||||
installStrategy addonv1alpha1.PlacementStrategy,
|
||||
installProgression addonv1alpha1.InstallProgression,
|
||||
clusters []string,
|
||||
) {
|
||||
placementLister clusterlisterv1beta1.PlacementLister,
|
||||
placementDecisionGetter helpers.PlacementDecisionGetter,
|
||||
) error {
|
||||
placementRef := installProgression.PlacementRef
|
||||
installConfigReference := installProgression.ConfigReferences
|
||||
|
||||
node := &installStrategyNode{
|
||||
placementRef: placementRef,
|
||||
maxConcurrency: maxMaxConcurrency,
|
||||
desiredConfigs: g.defaults.desiredConfigs,
|
||||
children: map[string]*addonNode{},
|
||||
clusters: sets.New[string](clusters...),
|
||||
// get placement
|
||||
if placementLister == nil {
|
||||
return fmt.Errorf("invalid placement lister %v", placementLister)
|
||||
}
|
||||
placement, err := placementLister.Placements(placementRef.Namespace).Get(placementRef.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set max concurrency
|
||||
if installStrategy.RolloutStrategy.Type == addonv1alpha1.AddonRolloutStrategyRollingUpdate {
|
||||
if installStrategy.RolloutStrategy.RollingUpdate != nil {
|
||||
node.maxConcurrency = installStrategy.RolloutStrategy.RollingUpdate.MaxConcurrency
|
||||
} else {
|
||||
node.maxConcurrency = defaultMaxConcurrency
|
||||
// new decision tracker
|
||||
pdTracker := clusterv1beta1.NewPlacementDecisionClustersTracker(placement, placementDecisionGetter, nil)
|
||||
|
||||
// refresh and get existing decision clusters
|
||||
err = pdTracker.Refresh()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusters := pdTracker.ExistingClusterGroupsBesides().GetClusters()
|
||||
|
||||
node := &installStrategyNode{
|
||||
placementRef: placementRef,
|
||||
pdTracker: pdTracker,
|
||||
rolloutStrategy: installStrategy.RolloutStrategy,
|
||||
desiredConfigs: g.defaults.desiredConfigs,
|
||||
children: map[string]*addonNode{},
|
||||
clusters: clusters,
|
||||
}
|
||||
|
||||
// Set MaxConcurrency
|
||||
// If progressive strategy is not initialized or MaxConcurrency is not specified, set MaxConcurrency to the default value
|
||||
if node.rolloutStrategy.Type == clusterv1alpha1.Progressive {
|
||||
progressiveStrategy := node.rolloutStrategy.Progressive
|
||||
|
||||
if progressiveStrategy == nil {
|
||||
progressiveStrategy = &clusterv1alpha1.RolloutProgressive{}
|
||||
}
|
||||
if progressiveStrategy.MaxConcurrency.StrVal == "" && progressiveStrategy.MaxConcurrency.IntVal == 0 {
|
||||
progressiveStrategy.MaxConcurrency = placement.Spec.DecisionStrategy.GroupStrategy.ClustersPerDecisionGroup
|
||||
}
|
||||
|
||||
node.rolloutStrategy.Progressive = progressiveStrategy
|
||||
}
|
||||
|
||||
// overrides configuration by install strategy
|
||||
@@ -183,19 +226,32 @@ func (g *configurationGraph) addPlacementNode(
|
||||
}
|
||||
|
||||
// remove addon in defaults and other placements.
|
||||
for _, cluster := range clusters {
|
||||
for _, cluster := range node.clusters.UnsortedList() {
|
||||
if _, ok := g.defaults.children[cluster]; ok {
|
||||
node.addNode(g.defaults.children[cluster].mca)
|
||||
delete(g.defaults.children, cluster)
|
||||
}
|
||||
for _, placement := range g.nodes {
|
||||
if _, ok := placement.children[cluster]; ok {
|
||||
node.addNode(placement.children[cluster].mca)
|
||||
delete(placement.children, cluster)
|
||||
for _, placementNode := range g.nodes {
|
||||
if _, ok := placementNode.children[cluster]; ok {
|
||||
node.addNode(placementNode.children[cluster].mca)
|
||||
delete(placementNode.children, cluster)
|
||||
}
|
||||
}
|
||||
}
|
||||
g.nodes = append(g.nodes, node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *configurationGraph) generateRolloutResult() error {
|
||||
for _, node := range g.nodes {
|
||||
if err := node.generateRolloutResult(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := g.defaults.generateRolloutResult(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode {
|
||||
@@ -207,13 +263,13 @@ func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]
|
||||
return placementNodeMap
|
||||
}
|
||||
|
||||
func (g *configurationGraph) addonToUpdate() []*addonNode {
|
||||
func (g *configurationGraph) getAddonsToUpdate() []*addonNode {
|
||||
var addons []*addonNode
|
||||
for _, node := range g.nodes {
|
||||
addons = append(addons, node.addonToUpdate()...)
|
||||
addons = append(addons, node.getAddonsToUpdate()...)
|
||||
}
|
||||
|
||||
addons = append(addons, g.defaults.addonToUpdate()...)
|
||||
addons = append(addons, g.defaults.getAddonsToUpdate()...)
|
||||
|
||||
return addons
|
||||
}
|
||||
@@ -249,83 +305,86 @@ func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn)
|
||||
}
|
||||
}
|
||||
|
||||
// set addon node upgrade status
|
||||
n.children[addon.Namespace].setUpgradeStatus()
|
||||
// set addon node rollout status
|
||||
n.children[addon.Namespace].setRolloutStatus()
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) addonUpgraded() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgraded {
|
||||
count += 1
|
||||
func (n *installStrategyNode) generateRolloutResult() error {
|
||||
if n.placementRef.Name == "" {
|
||||
// default addons
|
||||
rolloutResult := clusterv1alpha1.RolloutResult{}
|
||||
rolloutResult.ClustersToRollout = map[string]clusterv1alpha1.ClusterRolloutStatus{}
|
||||
for k, addon := range n.children {
|
||||
if addon.status.Status != clusterv1alpha1.Succeeded {
|
||||
rolloutResult.ClustersToRollout[k] = *addon.status
|
||||
}
|
||||
}
|
||||
n.rolloutResult = rolloutResult
|
||||
} else {
|
||||
// placement addons
|
||||
rolloutHandler, err := clusterv1alpha1.NewRolloutHandler(n.pdTracker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, rolloutResult, err := rolloutHandler.GetRolloutCluster(n.rolloutStrategy, n.getUpgradeStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.rolloutResult = rolloutResult
|
||||
}
|
||||
return count
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) addonUpgrading() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgrading {
|
||||
count += 1
|
||||
}
|
||||
func (n *installStrategyNode) getUpgradeStatus(clusterName string) clusterv1alpha1.ClusterRolloutStatus {
|
||||
if node, exist := n.children[clusterName]; exist {
|
||||
return *node.status
|
||||
} else {
|
||||
// if children not exist, return succeed status to skip
|
||||
return clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Skip}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// addonToUpdate finds the addons to be updated by placement
|
||||
func (n *installStrategyNode) addonToUpdate() []*addonNode {
|
||||
func (n *installStrategyNode) getAddonsToUpdate() []*addonNode {
|
||||
var addons []*addonNode
|
||||
var clusters []string
|
||||
|
||||
// sort the children by key
|
||||
keys := make([]string, 0, len(n.children))
|
||||
for k := range n.children {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
total := len(n.clusters)
|
||||
if total == 0 {
|
||||
total = len(n.children)
|
||||
// get addon to update from rollout result
|
||||
for c := range n.rolloutResult.ClustersToRollout {
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
||||
length, _ := parseMaxConcurrency(n.maxConcurrency, total)
|
||||
if length == 0 {
|
||||
return addons
|
||||
// sort addons by name
|
||||
sort.Strings(clusters)
|
||||
for _, k := range clusters {
|
||||
addons = append(addons, n.children[k])
|
||||
}
|
||||
|
||||
for i, k := range keys {
|
||||
if (i%length == 0) && len(addons) > 0 {
|
||||
return addons
|
||||
}
|
||||
|
||||
addon := n.children[k]
|
||||
if addon.mcaUpgradeStatus != upgraded {
|
||||
addons = append(addons, addon)
|
||||
}
|
||||
}
|
||||
|
||||
return addons
|
||||
}
|
||||
|
||||
func parseMaxConcurrency(maxConcurrency intstr.IntOrString, total int) (int, error) {
|
||||
var length int
|
||||
|
||||
switch maxConcurrency.Type {
|
||||
case intstr.String:
|
||||
str := maxConcurrency.StrVal
|
||||
f, err := strconv.ParseFloat(str[:len(str)-1], 64)
|
||||
if err != nil {
|
||||
return length, err
|
||||
func (n *installStrategyNode) countAddonUpgradeSucceed() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Succeeded {
|
||||
count += 1
|
||||
}
|
||||
length = int(math.Ceil(f / 100 * float64(total)))
|
||||
case intstr.Int:
|
||||
length = maxConcurrency.IntValue()
|
||||
default:
|
||||
return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
return length, nil
|
||||
func (n *installStrategyNode) countAddonUpgrading() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Progressing {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) countAddonTimeOut() int {
|
||||
return len(n.rolloutResult.ClustersTimeOut)
|
||||
}
|
||||
|
||||
func desiredConfigsEqual(a, b addonConfigMap) bool {
|
||||
|
||||
@@ -3,14 +3,25 @@ package addonconfiguration
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting"
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
)
|
||||
|
||||
var fakeTime = metav1.NewTime(time.Date(2022, time.January, 01, 0, 0, 0, 0, time.UTC))
|
||||
|
||||
type placementDesicion struct {
|
||||
addonv1alpha1.PlacementRef
|
||||
clusters []string
|
||||
clusters []clusterv1beta1.ClusterDecision
|
||||
}
|
||||
|
||||
func TestConfigurationGraph(t *testing.T) {
|
||||
@@ -54,7 +65,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -67,7 +79,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -90,13 +103,15 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
placementDesicions: []placementDesicion{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
clusters: []string{"cluster1"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
clusters: []string{"cluster2"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}}},
|
||||
},
|
||||
placementStrategies: []addonv1alpha1.PlacementStrategy{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
},
|
||||
installProgressions: []addonv1alpha1.InstallProgression{
|
||||
{
|
||||
@@ -133,7 +148,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -154,7 +170,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -175,7 +192,152 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mca progressing/failed/succeed",
|
||||
defaultConfigs: []addonv1alpha1.ConfigMeta{},
|
||||
defaultConfigReference: []addonv1alpha1.DefaultConfigReference{},
|
||||
addons: []*addonv1alpha1.ManagedClusterAddOn{
|
||||
newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
},
|
||||
}, []metav1.Condition{
|
||||
{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Reason: addonv1alpha1.ProgressingReasonUpgradeFailed,
|
||||
LastTransitionTime: fakeTime,
|
||||
},
|
||||
}),
|
||||
newManagedClusterAddon("test", "cluster2", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
},
|
||||
}, []metav1.Condition{
|
||||
{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Reason: addonv1alpha1.ProgressingReasonUpgrading,
|
||||
LastTransitionTime: fakeTime,
|
||||
},
|
||||
}),
|
||||
newManagedClusterAddon("test", "cluster3", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
},
|
||||
}, []metav1.Condition{
|
||||
{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Reason: addonv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
LastTransitionTime: fakeTime,
|
||||
},
|
||||
}),
|
||||
newManagedClusterAddon("test", "cluster4", []addonv1alpha1.AddOnConfig{}, []addonv1alpha1.ConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testx"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testx"},
|
||||
SpecHash: "<core-bar-testx-hash>",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "testx"},
|
||||
SpecHash: "<core-bar-testx-hash>",
|
||||
},
|
||||
LastObservedGeneration: 1,
|
||||
},
|
||||
}, []metav1.Condition{
|
||||
{
|
||||
Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Reason: addonv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
LastTransitionTime: fakeTime,
|
||||
},
|
||||
}),
|
||||
},
|
||||
placementDesicions: []placementDesicion{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"},
|
||||
{ClusterName: "cluster3"}, {ClusterName: "cluster4"}}},
|
||||
},
|
||||
placementStrategies: []addonv1alpha1.PlacementStrategy{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
},
|
||||
installProgressions: []addonv1alpha1.InstallProgression{
|
||||
{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
newInstallConfigReference("core", "Bar", "test1", "<core-bar-test1-hash>"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*addonNode{
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
{Group: "core", Resource: "Bar"}: {
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Failed, LastTransitionTime: &fakeTime},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
{Group: "core", Resource: "Bar"}: {
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Progressing, LastTransitionTime: &fakeTime},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
{Group: "core", Resource: "Bar"}: {
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "<core-bar-test1-hash>",
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster4"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -197,14 +359,16 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placementStrategies: []addonv1alpha1.PlacementStrategy{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
},
|
||||
placementDesicions: []placementDesicion{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
clusters: []string{"cluster1", "cluster2"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
clusters: []string{"cluster2", "cluster3"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}, {ClusterName: "cluster3"}}},
|
||||
},
|
||||
installProgressions: []addonv1alpha1.InstallProgression{
|
||||
{
|
||||
@@ -241,7 +405,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
mca: addontesting.NewAddon("test", "cluster1"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -262,7 +427,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -283,7 +449,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -303,19 +470,21 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{
|
||||
{ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}},
|
||||
}, nil),
|
||||
}, nil, nil),
|
||||
addontesting.NewAddon("test", "cluster2"),
|
||||
addontesting.NewAddon("test", "cluster3"),
|
||||
},
|
||||
placementStrategies: []addonv1alpha1.PlacementStrategy{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}},
|
||||
},
|
||||
placementDesicions: []placementDesicion{
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
clusters: []string{"cluster1"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}}},
|
||||
{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement2", Namespace: "test"},
|
||||
clusters: []string{"cluster2"}},
|
||||
clusters: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster2"}}},
|
||||
},
|
||||
installProgressions: []addonv1alpha1.InstallProgression{
|
||||
{
|
||||
@@ -355,7 +524,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
mca: newManagedClusterAddon("test", "cluster1", []addonv1alpha1.AddOnConfig{
|
||||
{ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Bar"},
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}},
|
||||
}, nil),
|
||||
}, nil, nil),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -376,7 +546,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
mca: addontesting.NewAddon("test", "cluster2"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
{
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{
|
||||
@@ -397,7 +568,8 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
mca: addontesting.NewAddon("test", "cluster3"),
|
||||
status: &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -405,15 +577,47 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fakeClusterClient := fakecluster.NewSimpleClientset()
|
||||
clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute)
|
||||
placementDecisionGetter := helpers.PlacementDecisionGetter{Client: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister()}
|
||||
placementLister := clusterInformers.Cluster().V1beta1().Placements().Lister()
|
||||
|
||||
for _, strategy := range c.placementStrategies {
|
||||
obj := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: strategy.Name, Namespace: strategy.Namespace}}
|
||||
if err := clusterInformers.Cluster().V1beta1().Placements().Informer().GetStore().Add(obj); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, decision := range c.placementDesicions {
|
||||
obj := &clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: decision.Name, Namespace: decision.Namespace,
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: decision.Name,
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
}},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{Decisions: decision.clusters},
|
||||
}
|
||||
if err := clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(obj); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
graph := newGraph(c.defaultConfigs, c.defaultConfigReference)
|
||||
for _, addon := range c.addons {
|
||||
graph.addAddonNode(addon)
|
||||
}
|
||||
for i, decision := range c.placementDesicions {
|
||||
graph.addPlacementNode(c.placementStrategies[i], c.installProgressions[i], decision.clusters)
|
||||
|
||||
for i := range c.placementStrategies {
|
||||
graph.addPlacementNode(c.placementStrategies[i], c.installProgressions[i], placementLister, placementDecisionGetter)
|
||||
}
|
||||
|
||||
actual := graph.addonToUpdate()
|
||||
err := graph.generateRolloutResult()
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when refresh rollout result: %v", err)
|
||||
}
|
||||
|
||||
actual := graph.getAddonsToUpdate()
|
||||
if len(actual) != len(c.expected) {
|
||||
t.Errorf("output length is not correct, expected %v, got %v", len(c.expected), len(actual))
|
||||
}
|
||||
@@ -425,8 +629,14 @@ func TestConfigurationGraph(t *testing.T) {
|
||||
t.Errorf("addonNode should not be nil")
|
||||
}
|
||||
if ev.mca != nil && v.mca != nil && ev.mca.Namespace == v.mca.Namespace {
|
||||
if !reflect.DeepEqual(v, ev) {
|
||||
t.Errorf("output is not correct, cluster %s, expected %v, got %v", v.mca.Namespace, ev, v)
|
||||
if !reflect.DeepEqual(v.mca.Name, ev.mca.Name) {
|
||||
t.Errorf("output mca name is not correct, cluster %s, expected %v, got %v", v.mca.Namespace, ev.mca.Name, v.mca.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(v.desiredConfigs, ev.desiredConfigs) {
|
||||
t.Errorf("output desiredConfigs is not correct, cluster %s, expected %v, got %v", v.mca.Namespace, ev.desiredConfigs, v.desiredConfigs)
|
||||
}
|
||||
if !reflect.DeepEqual(v.status, ev.status) {
|
||||
t.Errorf("output status is not correct, cluster %s, expected %v, got %v", v.mca.Namespace, ev.status, v.status)
|
||||
}
|
||||
compared = true
|
||||
}
|
||||
|
||||
@@ -43,8 +43,9 @@ func (d *clusterManagementAddonProgressingReconciler) reconcile(
|
||||
|
||||
setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i],
|
||||
isUpgrade,
|
||||
placementNode.addonUpgrading(),
|
||||
placementNode.addonUpgraded(),
|
||||
placementNode.countAddonUpgrading(),
|
||||
placementNode.countAddonUpgradeSucceed(),
|
||||
placementNode.countAddonTimeOut(),
|
||||
len(placementNode.clusters),
|
||||
)
|
||||
}
|
||||
@@ -56,7 +57,10 @@ func (d *clusterManagementAddonProgressingReconciler) reconcile(
|
||||
return cmaCopy, reconcileContinue, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1.InstallProgression, isUpgrade bool, progressing, done, total int) {
|
||||
func setAddOnInstallProgressionsAndLastApplied(
|
||||
installProgression *addonv1alpha1.InstallProgression,
|
||||
isUpgrade bool,
|
||||
progressing, done, timeout, total int) {
|
||||
// always update progressing condition when there is no config
|
||||
// skip update progressing condition when last applied config already the same as desired
|
||||
skip := len(installProgression.ConfigReferences) > 0
|
||||
@@ -76,10 +80,10 @@ func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1
|
||||
condition.Status = metav1.ConditionTrue
|
||||
if isUpgrade {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonUpgrading
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrading...", progressing+done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrading..., %d timeout.", progressing+done, total, timeout)
|
||||
} else {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonInstalling
|
||||
condition.Message = fmt.Sprintf("%d/%d installing...", progressing+done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d installing..., %d timeout.", progressing+done, total, timeout)
|
||||
}
|
||||
} else {
|
||||
for i, configRef := range installProgression.ConfigReferences {
|
||||
@@ -89,10 +93,10 @@ func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1
|
||||
condition.Status = metav1.ConditionFalse
|
||||
if isUpgrade {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors.", done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors, %d timeout.", done, total, timeout)
|
||||
} else {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed
|
||||
condition.Message = fmt.Sprintf("%d/%d install completed with no errors.", done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d install completed with no errors, %d timeout.", done, total, timeout)
|
||||
}
|
||||
}
|
||||
meta.SetStatusCondition(&installProgression.Conditions, condition)
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions"
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
"open-cluster-management.io/ocm/pkg/common/patcher"
|
||||
)
|
||||
|
||||
@@ -39,19 +41,21 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
name: "no managedClusteraddon",
|
||||
managedClusteraddon: []runtime.Object{},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -60,7 +64,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
@@ -88,7 +95,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/2 installing..." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/2 installing..., 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message)
|
||||
}
|
||||
},
|
||||
@@ -127,19 +134,21 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -148,7 +157,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
@@ -176,7 +188,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..., 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message)
|
||||
}
|
||||
},
|
||||
@@ -201,19 +213,21 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -222,7 +236,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -254,7 +271,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstallSucceed {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 install completed with no errors." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 install completed with no errors, 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
},
|
||||
@@ -275,23 +292,25 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -300,7 +319,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
@@ -325,7 +347,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 upgrading..." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 upgrading..., 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
},
|
||||
@@ -350,23 +372,25 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -375,7 +399,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -407,7 +434,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgradeSucceed {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 upgrade completed with no errors." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 upgrade completed with no errors, 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
},
|
||||
@@ -438,23 +465,25 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
LastAppliedConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -463,7 +492,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}},
|
||||
@@ -488,7 +520,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/1 upgrading..." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/1 upgrading..., 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
},
|
||||
@@ -509,19 +541,21 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
return addon
|
||||
}()},
|
||||
clusterManagementAddon: []runtime.Object{addontesting.NewClusterManagementAddon("test", "", "").
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"}}).
|
||||
WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
WithPlacementStrategy(addonv1alpha1.PlacementStrategy{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All},
|
||||
}).WithInstallProgression(addonv1alpha1.InstallProgression{
|
||||
PlacementRef: addonv1alpha1.PlacementRef{Name: "placement1", Namespace: "test"},
|
||||
ConfigReferences: []addonv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"},
|
||||
DesiredConfig: &addonv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"},
|
||||
SpecHash: "hash1",
|
||||
},
|
||||
},
|
||||
}).Build()},
|
||||
},
|
||||
}).Build()},
|
||||
placements: []runtime.Object{
|
||||
&clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "test"}},
|
||||
},
|
||||
@@ -530,7 +564,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "placement1",
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "placement1"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "placement1",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: []clusterv1beta1.ClusterDecision{{ClusterName: "cluster1"}, {ClusterName: "cluster2"}},
|
||||
@@ -558,7 +595,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions)
|
||||
}
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..." {
|
||||
if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..., 0 timeout." {
|
||||
t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message)
|
||||
}
|
||||
},
|
||||
@@ -610,7 +647,7 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
|
||||
controller := &addonConfigurationController{
|
||||
addonClient: fakeAddonClient,
|
||||
placementDecisionLister: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister(),
|
||||
placementDecisionGetter: helpers.PlacementDecisionGetter{Client: clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister()},
|
||||
placementLister: clusterInformers.Cluster().V1beta1().Placements().Lister(),
|
||||
clusterManagementAddonLister: addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Lister(),
|
||||
managedClusterAddonIndexer: addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().GetIndexer(),
|
||||
@@ -627,6 +664,11 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when build graph: %v", err)
|
||||
}
|
||||
err = graph.generateRolloutResult()
|
||||
if err != nil {
|
||||
t.Errorf("expected no error when refresh rollout result: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = reconcile.reconcile(context.TODO(), obj.(*addonv1alpha1.ClusterManagementAddOn), graph)
|
||||
if err != nil && !c.expectErr {
|
||||
t.Errorf("expected no error when sync: %v", err)
|
||||
|
||||
26
pkg/common/helpers/clusters.go
Normal file
26
pkg/common/helpers/clusters.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
type PlacementDecisionGetter struct {
|
||||
Client clusterlister.PlacementDecisionLister
|
||||
}
|
||||
|
||||
func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace string) ([]*clusterv1beta1.PlacementDecision, error) {
|
||||
return pdl.Client.PlacementDecisions(namespace).List(selector)
|
||||
}
|
||||
|
||||
// Get added and deleted clusters names
|
||||
func GetClusterChanges(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement,
|
||||
existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) {
|
||||
pdtracker := clusterv1beta1.NewPlacementDecisionClustersTracker(
|
||||
placement, PlacementDecisionGetter{Client: client}, existingClusters)
|
||||
|
||||
return pdtracker.GetClusterChanges()
|
||||
}
|
||||
114
pkg/common/helpers/clusters_test.go
Normal file
114
pkg/common/helpers/clusters_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake"
|
||||
clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
func newFakePlacementDecision(placementName, groupName string, groupIndex int, clusterNames ...string) *clusterv1beta1.PlacementDecision {
|
||||
decisions := make([]clusterv1beta1.ClusterDecision, len(clusterNames))
|
||||
for i, clusterName := range clusterNames {
|
||||
decisions[i] = clusterv1beta1.ClusterDecision{ClusterName: clusterName}
|
||||
}
|
||||
|
||||
return &clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: placementName,
|
||||
clusterv1beta1.DecisionGroupNameLabel: groupName,
|
||||
clusterv1beta1.DecisionGroupIndexLabel: strconv.Itoa(groupIndex),
|
||||
},
|
||||
},
|
||||
Status: clusterv1beta1.PlacementDecisionStatus{
|
||||
Decisions: decisions,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPlacementDecisionClustersTracker_GetClusterChanges(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
placement *clusterv1beta1.Placement
|
||||
existingScheduledClusters sets.Set[string]
|
||||
updateDecisions []runtime.Object
|
||||
expectAddedScheduledClusters sets.Set[string]
|
||||
expectDeletedScheduledClusters sets.Set[string]
|
||||
}{
|
||||
{
|
||||
name: "test placementdecisions",
|
||||
placement: &clusterv1beta1.Placement{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "default"},
|
||||
Spec: clusterv1beta1.PlacementSpec{},
|
||||
},
|
||||
existingScheduledClusters: sets.New[string]("cluster1", "cluster2"),
|
||||
updateDecisions: []runtime.Object{
|
||||
newFakePlacementDecision("placement1", "", 0, "cluster1", "cluster3"),
|
||||
},
|
||||
expectAddedScheduledClusters: sets.New[string]("cluster3"),
|
||||
expectDeletedScheduledClusters: sets.New[string]("cluster2"),
|
||||
},
|
||||
{
|
||||
name: "test empty placementdecision",
|
||||
placement: &clusterv1beta1.Placement{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "default"},
|
||||
Spec: clusterv1beta1.PlacementSpec{},
|
||||
},
|
||||
existingScheduledClusters: sets.New[string](),
|
||||
updateDecisions: []runtime.Object{
|
||||
newFakePlacementDecision("placement1", "", 0, "cluster1", "cluster2"),
|
||||
},
|
||||
expectAddedScheduledClusters: sets.New[string]("cluster1", "cluster2"),
|
||||
expectDeletedScheduledClusters: sets.New[string](),
|
||||
},
|
||||
{
|
||||
name: "test nil exist cluster groups",
|
||||
placement: &clusterv1beta1.Placement{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "placement1", Namespace: "default"},
|
||||
Spec: clusterv1beta1.PlacementSpec{},
|
||||
},
|
||||
existingScheduledClusters: nil,
|
||||
updateDecisions: []runtime.Object{
|
||||
newFakePlacementDecision("placement1", "", 0, "cluster1", "cluster2"),
|
||||
},
|
||||
expectAddedScheduledClusters: sets.New[string]("cluster1", "cluster2"),
|
||||
expectDeletedScheduledClusters: sets.New[string](),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeClusterClient := fakecluster.NewSimpleClientset()
|
||||
clusterInformers := clusterv1informers.NewSharedInformerFactory(fakeClusterClient, 10*time.Minute)
|
||||
|
||||
for _, obj := range test.updateDecisions {
|
||||
if err := clusterInformers.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(obj); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check changed decision clusters
|
||||
client := clusterInformers.Cluster().V1beta1().PlacementDecisions().Lister()
|
||||
addedClusters, deletedClusters, err := GetClusterChanges(client, test.placement, test.existingScheduledClusters)
|
||||
if err != nil {
|
||||
t.Errorf("Case: %v, Failed to run Get(): %v", test.name, err)
|
||||
}
|
||||
if !reflect.DeepEqual(addedClusters, test.expectAddedScheduledClusters) {
|
||||
t.Errorf("Case: %v, expect added decisions: %v, return decisions: %v", test.name, test.expectAddedScheduledClusters, addedClusters)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(deletedClusters, test.expectDeletedScheduledClusters) {
|
||||
t.Errorf("Case: %v, expect deleted decisions: %v, return decisions: %v", test.name, test.expectDeletedScheduledClusters, deletedClusters)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,16 +19,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
)
|
||||
|
||||
@@ -471,20 +467,3 @@ func BuildResourceMeta(
|
||||
resourceMeta.Resource = mapping.Resource.Resource
|
||||
return resourceMeta, mapping.Resource, err
|
||||
}
|
||||
|
||||
type PlacementDecisionGetter struct {
|
||||
Client clusterlister.PlacementDecisionLister
|
||||
}
|
||||
|
||||
func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace string) ([]*clusterv1beta1.PlacementDecision, error) {
|
||||
return pdl.Client.PlacementDecisions(namespace).List(selector)
|
||||
}
|
||||
|
||||
// Get added and deleted clusters names
|
||||
func GetClusters(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement,
|
||||
existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) {
|
||||
pdtracker := clusterv1beta1.NewPlacementDecisionClustersTracker(
|
||||
placement, PlacementDecisionGetter{Client: client}, existingClusters)
|
||||
|
||||
return pdtracker.Get()
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
workv1 "open-cluster-management.io/api/work/v1"
|
||||
workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/work/helper"
|
||||
"open-cluster-management.io/ocm/pkg/common/helpers"
|
||||
)
|
||||
|
||||
// deployReconciler is to manage ManifestWork based on the placement.
|
||||
@@ -56,7 +56,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha
|
||||
}
|
||||
|
||||
for _, placement := range placements {
|
||||
added, deleted, err := helper.GetClusters(d.placeDecisionLister, placement, existingClusters)
|
||||
added, deleted, err := helpers.GetClusterChanges(d.placeDecisionLister, placement, existingClusters)
|
||||
if err != nil {
|
||||
apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, ""))
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
@@ -151,13 +152,13 @@ var _ = ginkgo.Describe("AddConfigs", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
RolloutStrategy: addonapiv1alpha1.RolloutStrategy{
|
||||
Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll,
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.All,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
updateClusterManagementAddOn(context.Background(), cma)
|
||||
patchClusterManagementAddOn(context.Background(), cma)
|
||||
|
||||
placement := &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: configDefaultNamespace}}
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(configDefaultNamespace).Create(context.Background(), placement, metav1.CreateOptions{})
|
||||
@@ -167,7 +168,10 @@ var _ = ginkgo.Describe("AddConfigs", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: configDefaultNamespace,
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
}
|
||||
decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(configDefaultNamespace).Create(context.Background(), decision, metav1.CreateOptions{})
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
@@ -88,7 +89,10 @@ var _ = ginkgo.Describe("Agent deploy", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-placement",
|
||||
Namespace: placementNamespace,
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: "test-placement"},
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: "test-placement",
|
||||
clusterv1beta1.DecisionGroupIndexLabel: "0",
|
||||
},
|
||||
},
|
||||
}
|
||||
decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Create(context.Background(), decision, metav1.CreateOptions{})
|
||||
@@ -109,8 +113,8 @@ var _ = ginkgo.Describe("Agent deploy", func() {
|
||||
Placements: []addonapiv1alpha1.PlacementStrategy{
|
||||
{
|
||||
PlacementRef: addonapiv1alpha1.PlacementRef{Name: "test-placement", Namespace: placementNamespace},
|
||||
RolloutStrategy: addonapiv1alpha1.RolloutStrategy{
|
||||
Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll,
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.All,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
ginkgo "github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -16,9 +15,8 @@ import (
|
||||
|
||||
"open-cluster-management.io/addon-framework/pkg/addonmanager/constants"
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -97,8 +95,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Placements: []addonapiv1alpha1.PlacementStrategy{
|
||||
{
|
||||
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace},
|
||||
RolloutStrategy: addonapiv1alpha1.RolloutStrategy{
|
||||
Type: addonapiv1alpha1.AddonRolloutStrategyUpdateAll,
|
||||
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.All,
|
||||
},
|
||||
Configs: []addonapiv1alpha1.AddOnConfig{
|
||||
{
|
||||
@@ -124,19 +122,7 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
for i := 0; i < 4; i++ {
|
||||
managedClusterName := fmt.Sprintf("managedcluster-%s-%d", suffix, i)
|
||||
clusterNames = append(clusterNames, managedClusterName)
|
||||
managedCluster := &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: managedClusterName,
|
||||
},
|
||||
Spec: clusterv1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
}
|
||||
_, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}}
|
||||
_, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
err = createManagedCluster(hubClusterClient, managedClusterName)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
@@ -157,23 +143,10 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
_, err = hubClusterClient.ClusterV1beta1().Placements(placementNamespace).Create(context.Background(), placement, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
decision := &clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: placementName,
|
||||
Namespace: placementNamespace,
|
||||
Labels: map[string]string{clusterv1beta1.PlacementLabel: placementName},
|
||||
},
|
||||
}
|
||||
decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Create(context.Background(), decision, metav1.CreateOptions{})
|
||||
// prepare placement decisions
|
||||
err = createPlacementDecision(hubClusterClient, placementNamespace, placementName, "0", clusterNames[0], clusterNames[1])
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
decision.Status.Decisions = []clusterv1beta1.ClusterDecision{
|
||||
{ClusterName: clusterNames[0]},
|
||||
{ClusterName: clusterNames[1]},
|
||||
{ClusterName: clusterNames[2]},
|
||||
{ClusterName: clusterNames[3]},
|
||||
}
|
||||
_, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{})
|
||||
err = createPlacementDecision(hubClusterClient, placementNamespace, placementName, "1", clusterNames[2], clusterNames[3])
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// prepare default config
|
||||
@@ -181,26 +154,11 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
_, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), configDefaultNS, metav1.CreateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
addOnDefaultConfig := &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configDefaultName,
|
||||
Namespace: configDefaultNamespace,
|
||||
},
|
||||
Spec: addOnDefaultConfigSpec,
|
||||
}
|
||||
_, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(
|
||||
context.Background(), addOnDefaultConfig, metav1.CreateOptions{})
|
||||
err = createAddOnDeploymentConfig(hubAddonClient, configDefaultNamespace, configDefaultName, addOnDefaultConfigSpec)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
// prepare update config
|
||||
addOnUpdateConfig := &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configUpdateName,
|
||||
Namespace: configDefaultNamespace,
|
||||
},
|
||||
Spec: addOnTest2ConfigSpec,
|
||||
}
|
||||
_, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Create(context.Background(), addOnUpdateConfig, metav1.CreateOptions{})
|
||||
err = createAddOnDeploymentConfig(hubAddonClient, configDefaultNamespace, configUpdateName, addOnTest2ConfigSpec)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -240,17 +198,7 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
|
||||
ginkgo.By("update work status to trigger addon status")
|
||||
for i := 0; i < 4; i++ {
|
||||
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
@@ -325,18 +273,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonInstallSucceed,
|
||||
Message: "4/4 install completed with no errors.",
|
||||
Message: "4/4 install completed with no errors, 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update all")
|
||||
ginkgo.By("upgrade configs to test1")
|
||||
addOnConfig, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Get(
|
||||
context.Background(), configDefaultName, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
addOnConfig.Spec = addOnTest1ConfigSpec
|
||||
_, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configDefaultNamespace).Update(
|
||||
context.Background(), addOnConfig, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
updateAddOnDeploymentConfigSpec(hubAddonClient, configDefaultNamespace, configDefaultName, addOnTest1ConfigSpec)
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
for i := 0; i < 4; i++ {
|
||||
@@ -410,39 +352,24 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
Message: "4/4 upgrade completed with no errors.",
|
||||
Message: "4/4 upgrade completed with no errors, 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update work status to avoid addon status update")
|
||||
gomega.Eventually(func() error {
|
||||
for i := 0; i < 4; i++ {
|
||||
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionFalse, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
for i := 0; i < 4; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionFalse)
|
||||
}
|
||||
|
||||
ginkgo.By("rolling upgrade")
|
||||
ginkgo.By("rolling upgrade per cluster")
|
||||
ginkgo.By("update cma to rolling update")
|
||||
cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), cma.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
cma.Spec.InstallStrategy.Placements[0].RolloutStrategy.Type = addonapiv1alpha1.AddonRolloutStrategyRollingUpdate
|
||||
cma.Spec.InstallStrategy.Placements[0].RolloutStrategy.RollingUpdate = &addonapiv1alpha1.RollingUpdate{MaxConcurrency: intstr.FromString("50%")}
|
||||
cma.Spec.InstallStrategy.Placements[0].RolloutStrategy = clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.Progressive,
|
||||
Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromInt(2)},
|
||||
}
|
||||
cma.Spec.InstallStrategy.Placements[0].Configs[0].ConfigReferent = addonapiv1alpha1.ConfigReferent{Namespace: configDefaultNamespace, Name: configUpdateName}
|
||||
_, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), cma, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
patchClusterManagementAddOn(context.Background(), cma)
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
for i := 0; i < 2; i++ {
|
||||
@@ -549,29 +476,13 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgrading,
|
||||
Message: "2/4 upgrading...",
|
||||
Message: "2/4 upgrading..., 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update 2 work status to trigger addon status")
|
||||
gomega.Eventually(func() error {
|
||||
for i := 0; i < 2; i++ {
|
||||
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
for i := 0; i < 2; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
for i := 0; i < 2; i++ {
|
||||
@@ -645,29 +556,13 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgrading,
|
||||
Message: "4/4 upgrading...",
|
||||
Message: "4/4 upgrading..., 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update another 2 work status to trigger addon status")
|
||||
gomega.Eventually(func() error {
|
||||
for i := 2; i < 4; i++ {
|
||||
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
||||
_, err = hubWorkClient.WorkV1().ManifestWorks(clusterNames[i]).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
for i := 2; i < 4; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{
|
||||
@@ -706,7 +601,257 @@ var _ = ginkgo.Describe("Addon upgrade", func() {
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
Message: "4/4 upgrade completed with no errors.",
|
||||
Message: "4/4 upgrade completed with no errors, 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update work status to avoid addon status update")
|
||||
for i := 0; i < 4; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionFalse)
|
||||
}
|
||||
|
||||
ginkgo.By("rolling upgrade per group")
|
||||
ginkgo.By("update cma to rolling update per group")
|
||||
cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), cma.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
cma.Spec.InstallStrategy.Placements[0].RolloutStrategy = clusterv1alpha1.RolloutStrategy{
|
||||
Type: clusterv1alpha1.ProgressivePerGroup,
|
||||
}
|
||||
patchClusterManagementAddOn(context.Background(), cma)
|
||||
|
||||
ginkgo.By("upgrade configs to test3")
|
||||
updateAddOnDeploymentConfigSpec(hubAddonClient, configDefaultNamespace, configUpdateName, addOnTest3ConfigSpec)
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
for i := 0; i < 2; i++ {
|
||||
assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
LastObservedGeneration: 2,
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgrading,
|
||||
Message: "upgrading... work is not ready",
|
||||
})
|
||||
}
|
||||
for i := 2; i < 4; i++ {
|
||||
assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
LastObservedGeneration: 2,
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
Message: "upgrade completed with no errors.",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{
|
||||
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace},
|
||||
ConfigReferences: []addonapiv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgrading,
|
||||
Message: "2/4 upgrading..., 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update 2 work status to trigger addon status")
|
||||
for i := 0; i < 2; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
ginkgo.By("check mca status")
|
||||
for i := 0; i < 2; i++ {
|
||||
assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
LastObservedGeneration: 2,
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
})
|
||||
assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
Message: "upgrade completed with no errors.",
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{
|
||||
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace},
|
||||
ConfigReferences: []addonapiv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest2ConfigSpecHash,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgrading,
|
||||
Message: "4/4 upgrading..., 0 timeout.",
|
||||
})
|
||||
|
||||
ginkgo.By("update another 2 work status to trigger addon status")
|
||||
for i := 2; i < 4; i++ {
|
||||
updateManifestWorkStatus(hubWorkClient, clusterNames[i], manifestWorkName, metav1.ConditionTrue)
|
||||
}
|
||||
|
||||
ginkgo.By("check cma status")
|
||||
assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{
|
||||
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementNamespace, Namespace: placementNamespace},
|
||||
ConfigReferences: []addonapiv1alpha1.InstallConfigReference{
|
||||
{
|
||||
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
|
||||
Group: addOnDeploymentConfigGVR.Group,
|
||||
Resource: addOnDeploymentConfigGVR.Resource,
|
||||
},
|
||||
DesiredConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastAppliedConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
LastKnownGoodConfig: &addonapiv1alpha1.ConfigSpecHash{
|
||||
ConfigReferent: addonapiv1alpha1.ConfigReferent{
|
||||
Namespace: configDefaultNamespace,
|
||||
Name: configUpdateName,
|
||||
},
|
||||
SpecHash: addOnTest3ConfigSpecHash,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{
|
||||
Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed,
|
||||
Message: "4/4 upgrade completed with no errors, 0 timeout.",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -6,12 +6,21 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned"
|
||||
clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned"
|
||||
workclientset "open-cluster-management.io/api/client/work/clientset/versioned"
|
||||
clusterv1 "open-cluster-management.io/api/cluster/v1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
workapiv1 "open-cluster-management.io/api/work/v1"
|
||||
|
||||
"open-cluster-management.io/ocm/pkg/common/patcher"
|
||||
)
|
||||
|
||||
// TODO: The spec hash is hardcoded here and will break the test once the API changes.
|
||||
@@ -19,6 +28,7 @@ import (
|
||||
const addOnDefaultConfigSpecHash = "19afba0192e93f738a3b6e118f177dffbc6546f7d32a3aed79cba761e6123a1f" //nolint:gosec
|
||||
const addOnTest1ConfigSpecHash = "1eef585d8f4d92a0d1aa5ac3a61337dd03c2723f9f8d1c56f339e0af0becd4af" //nolint:gosec
|
||||
const addOnTest2ConfigSpecHash = "c0f1c4105357e7a298a8789d88c7acffc83704f407a35d73fa0b83a079958440" //nolint:gosec
|
||||
const addOnTest3ConfigSpecHash = "83ca9715ea80ec4b88438607e17f15aaf1c2db8e469dfa933ceb5887cc49328d" //nolint:gosec
|
||||
|
||||
var addOnDefaultConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{
|
||||
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
|
||||
@@ -45,6 +55,15 @@ var addOnTest2ConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{
|
||||
},
|
||||
}
|
||||
|
||||
var addOnTest3ConfigSpec = addonapiv1alpha1.AddOnDeploymentConfigSpec{
|
||||
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
|
||||
{
|
||||
Name: "test3",
|
||||
Value: "test3",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func createClusterManagementAddOn(name, defaultConfigNamespace, defaultConfigName string) (*addonapiv1alpha1.ClusterManagementAddOn, error) {
|
||||
clusterManagementAddon, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -90,15 +109,25 @@ func createClusterManagementAddOn(name, defaultConfigNamespace, defaultConfigNam
|
||||
return clusterManagementAddon, nil
|
||||
}
|
||||
|
||||
func updateClusterManagementAddOn(_ context.Context, new *addonapiv1alpha1.ClusterManagementAddOn) {
|
||||
func patchClusterManagementAddOn(_ context.Context, newaddon *addonapiv1alpha1.ClusterManagementAddOn) {
|
||||
gomega.Eventually(func() error {
|
||||
old, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), new.Name, metav1.GetOptions{})
|
||||
old, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), newaddon.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
old.Spec = new.Spec
|
||||
old.Annotations = new.Annotations
|
||||
_, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), old, metav1.UpdateOptions{})
|
||||
new := old.DeepCopy()
|
||||
new.Spec = *newaddon.Spec.DeepCopy()
|
||||
new.Annotations = newaddon.Annotations
|
||||
|
||||
cmaPatcher := patcher.NewPatcher[
|
||||
*addonapiv1alpha1.ClusterManagementAddOn, addonapiv1alpha1.ClusterManagementAddOnSpec, addonapiv1alpha1.ClusterManagementAddOnStatus](
|
||||
hubAddonClient.AddonV1alpha1().ClusterManagementAddOns())
|
||||
_, err = cmaPatcher.PatchSpec(context.Background(), new, new.Spec, old.Spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = cmaPatcher.PatchLabelAnnotations(context.Background(), new, new.ObjectMeta, old.ObjectMeta)
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
}
|
||||
@@ -140,6 +169,118 @@ func assertClusterManagementAddOnDefaultConfigReferences(name string, expect ...
|
||||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func updateManifestWorkStatus(client workclientset.Interface, clusterName string, manifestWorkName string, status metav1.ConditionStatus) {
|
||||
gomega.Eventually(func() error {
|
||||
work, err := client.WorkV1().ManifestWorks(clusterName).Get(context.Background(), manifestWorkName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkApplied, Status: status, Reason: "WorkApplied", ObservedGeneration: work.Generation})
|
||||
meta.SetStatusCondition(
|
||||
&work.Status.Conditions,
|
||||
metav1.Condition{Type: workapiv1.WorkAvailable, Status: status, Reason: "WorkAvailable", ObservedGeneration: work.Generation})
|
||||
|
||||
_, err = client.WorkV1().ManifestWorks(clusterName).UpdateStatus(context.Background(), work, metav1.UpdateOptions{})
|
||||
return err
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
}
|
||||
|
||||
func createAddOnDeploymentConfig(
|
||||
hubAddonClient addonv1alpha1client.Interface,
|
||||
configNamespace string,
|
||||
configName string,
|
||||
configSpec addonapiv1alpha1.AddOnDeploymentConfigSpec,
|
||||
) error {
|
||||
config := &addonapiv1alpha1.AddOnDeploymentConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
Namespace: configNamespace,
|
||||
},
|
||||
Spec: configSpec,
|
||||
}
|
||||
_, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configNamespace).Create(
|
||||
context.Background(), config, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func updateAddOnDeploymentConfigSpec(
|
||||
hubAddonClient addonv1alpha1client.Interface,
|
||||
configNamespace string,
|
||||
configName string,
|
||||
newConfigSpec addonapiv1alpha1.AddOnDeploymentConfigSpec,
|
||||
) {
|
||||
gomega.Eventually(func() error {
|
||||
addOnConfig, err := hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configNamespace).Get(
|
||||
context.Background(), configName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addOnConfig.Spec = newConfigSpec
|
||||
_, err = hubAddonClient.AddonV1alpha1().AddOnDeploymentConfigs(configNamespace).Update(
|
||||
context.Background(), addOnConfig, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed())
|
||||
}
|
||||
|
||||
func createManagedCluster(hubClusterClient clusterv1client.Interface, managedClusterName string) error {
|
||||
managedCluster := &clusterv1.ManagedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: managedClusterName,
|
||||
},
|
||||
Spec: clusterv1.ManagedClusterSpec{
|
||||
HubAcceptsClient: true,
|
||||
},
|
||||
}
|
||||
_, err := hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}}
|
||||
_, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPlacementDecision(hubClusterClient clusterv1client.Interface, placementNamespace, placementName, groupIndex string, clusterNames ...string) error {
|
||||
var err error
|
||||
decision := &clusterv1beta1.PlacementDecision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: placementName + groupIndex,
|
||||
Namespace: placementNamespace,
|
||||
Labels: map[string]string{
|
||||
clusterv1beta1.PlacementLabel: placementName,
|
||||
clusterv1beta1.DecisionGroupIndexLabel: groupIndex,
|
||||
},
|
||||
},
|
||||
}
|
||||
decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).Create(context.Background(), decision, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterDecisions := []clusterv1beta1.ClusterDecision{}
|
||||
for _, clusterName := range clusterNames {
|
||||
clusterDecisions = append(clusterDecisions, clusterv1beta1.ClusterDecision{ClusterName: clusterName})
|
||||
}
|
||||
decision.Status.Decisions = clusterDecisions
|
||||
|
||||
_, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementNamespace).UpdateStatus(context.Background(), decision, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertClusterManagementAddOnInstallProgression(name string, expect ...addonapiv1alpha1.InstallProgression) {
|
||||
ginkgo.By(fmt.Sprintf("Check ClusterManagementAddOn %s InstallProgression", name))
|
||||
|
||||
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@@ -1408,7 +1408,7 @@ k8s.io/utils/path
|
||||
k8s.io/utils/pointer
|
||||
k8s.io/utils/strings/slices
|
||||
k8s.io/utils/trace
|
||||
# open-cluster-management.io/addon-framework v0.7.1-0.20230911140813-9676b4f8c180
|
||||
# open-cluster-management.io/addon-framework v0.7.1-0.20230920005921-65bcbb446df8
|
||||
## explicit; go 1.20
|
||||
open-cluster-management.io/addon-framework/pkg/addonfactory
|
||||
open-cluster-management.io/addon-framework/pkg/addonmanager
|
||||
@@ -1428,7 +1428,7 @@ open-cluster-management.io/addon-framework/pkg/index
|
||||
open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration
|
||||
open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner
|
||||
open-cluster-management.io/addon-framework/pkg/utils
|
||||
# open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83
|
||||
# open-cluster-management.io/api v0.11.1-0.20230919033310-0146ddfab71c
|
||||
## explicit; go 1.19
|
||||
open-cluster-management.io/api/addon/v1alpha1
|
||||
open-cluster-management.io/api/client/addon/clientset/versioned
|
||||
|
||||
@@ -24,7 +24,7 @@ func (d *managedClusterAddonConfigurationReconciler) reconcile(
|
||||
ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) {
|
||||
var errs []error
|
||||
|
||||
for _, addon := range graph.addonToUpdate() {
|
||||
for _, addon := range graph.getAddonsToUpdate() {
|
||||
mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs)
|
||||
err := d.patchAddonStatus(ctx, mca, addon.mca)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1"
|
||||
addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1"
|
||||
clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1"
|
||||
clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
|
||||
@@ -32,6 +33,7 @@ type addonConfigurationController struct {
|
||||
addonFilterFunc factory.EventFilterFunc
|
||||
placementLister clusterlisterv1beta1.PlacementLister
|
||||
placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister
|
||||
placementDecisionGetter PlacementDecisionGetter
|
||||
|
||||
reconcilers []addonConfigurationReconcile
|
||||
}
|
||||
@@ -93,6 +95,7 @@ func NewAddonConfigurationController(
|
||||
WithInformersQueueKeysFunc(index.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer())
|
||||
c.placementLister = placementInformer.Lister()
|
||||
c.placementDecisionLister = placementDecisionInformer.Lister()
|
||||
c.placementDecisionGetter = PlacementDecisionGetter{Client: placementDecisionInformer.Lister()}
|
||||
}
|
||||
|
||||
return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller")
|
||||
@@ -125,6 +128,13 @@ func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory
|
||||
return err
|
||||
}
|
||||
|
||||
// generate the rollout result before calling reconcile()
|
||||
// so that all the reconcilers are using the same rollout result
|
||||
err = graph.generateRolloutResult()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var state reconcileState
|
||||
var errs []error
|
||||
for _, reconciler := range c.reconcilers {
|
||||
@@ -160,20 +170,16 @@ func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha
|
||||
// check each install strategy in status
|
||||
var errs []error
|
||||
for _, installProgression := range cma.Status.InstallProgressions {
|
||||
clusters, err := c.getClustersByPlacement(installProgression.PlacementRef.Name, installProgression.PlacementRef.Namespace)
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("placement %s/%s is not found for addon %s", installProgression.PlacementRef.Namespace, installProgression.PlacementRef.Name, cma.Name)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, installStrategy := range cma.Spec.InstallStrategy.Placements {
|
||||
if installStrategy.PlacementRef == installProgression.PlacementRef {
|
||||
graph.addPlacementNode(installStrategy, installProgression, clusters)
|
||||
if installStrategy.PlacementRef != installProgression.PlacementRef {
|
||||
continue
|
||||
}
|
||||
|
||||
// add placement node
|
||||
err = graph.addPlacementNode(installStrategy, installProgression, c.placementLister, c.placementDecisionGetter)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -181,29 +187,10 @@ func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha
|
||||
return graph, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *addonConfigurationController) getClustersByPlacement(name, namespace string) ([]string, error) {
|
||||
var clusters []string
|
||||
if c.placementLister == nil || c.placementDecisionLister == nil {
|
||||
return clusters, nil
|
||||
}
|
||||
_, err := c.placementLister.Placements(namespace).Get(name)
|
||||
if err != nil {
|
||||
return clusters, err
|
||||
}
|
||||
|
||||
decisionSelector := labels.SelectorFromSet(labels.Set{
|
||||
clusterv1beta1.PlacementLabel: name,
|
||||
})
|
||||
decisions, err := c.placementDecisionLister.PlacementDecisions(namespace).List(decisionSelector)
|
||||
if err != nil {
|
||||
return clusters, err
|
||||
}
|
||||
|
||||
for _, d := range decisions {
|
||||
for _, sd := range d.Status.Decisions {
|
||||
clusters = append(clusters, sd.ClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
type PlacementDecisionGetter struct {
|
||||
Client clusterlister.PlacementDecisionLister
|
||||
}
|
||||
|
||||
func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace string) ([]*clusterv1beta1.PlacementDecision, error) {
|
||||
return pdl.Client.PlacementDecisions(namespace).List(selector)
|
||||
}
|
||||
|
||||
@@ -2,19 +2,16 @@ package addonconfiguration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMaxConcurrency = intstr.FromString("25%")
|
||||
maxMaxConcurrency = intstr.FromString("100%")
|
||||
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
|
||||
clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
|
||||
)
|
||||
|
||||
// configurationTree is a 2 level snapshot tree on the configuration of addons
|
||||
@@ -29,9 +26,11 @@ type configurationGraph struct {
|
||||
|
||||
// installStrategyNode is a node in configurationGraph defined by a install strategy
|
||||
type installStrategyNode struct {
|
||||
placementRef addonv1alpha1.PlacementRef
|
||||
maxConcurrency intstr.IntOrString
|
||||
desiredConfigs addonConfigMap
|
||||
placementRef addonv1alpha1.PlacementRef
|
||||
pdTracker *clusterv1beta1.PlacementDecisionClustersTracker
|
||||
rolloutStrategy clusterv1alpha1.RolloutStrategy
|
||||
rolloutResult clusterv1alpha1.RolloutResult
|
||||
desiredConfigs addonConfigMap
|
||||
// children keeps a map of addons node as the children of this node
|
||||
children map[string]*addonNode
|
||||
clusters sets.Set[string]
|
||||
@@ -42,46 +41,63 @@ type installStrategyNode struct {
|
||||
type addonNode struct {
|
||||
desiredConfigs addonConfigMap
|
||||
mca *addonv1alpha1.ManagedClusterAddOn
|
||||
// record mca upgrade status
|
||||
mcaUpgradeStatus upgradeStatus
|
||||
status *clusterv1alpha1.ClusterRolloutStatus
|
||||
}
|
||||
|
||||
type upgradeStatus int
|
||||
|
||||
const (
|
||||
// mca desired configs not synced from desiredConfigs yet
|
||||
toupgrade upgradeStatus = iota
|
||||
// mca desired configs upgraded and last applied configs not upgraded
|
||||
upgrading
|
||||
// both desired configs and last applied configs are upgraded
|
||||
upgraded
|
||||
)
|
||||
|
||||
type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference
|
||||
|
||||
// set addon upgrade status
|
||||
func (n *addonNode) setUpgradeStatus() {
|
||||
// set addon rollout status
|
||||
func (n *addonNode) setRolloutStatus() {
|
||||
// desired configs doesn't match actual configs, set to ToApply
|
||||
if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
}
|
||||
|
||||
var progressingCond metav1.Condition
|
||||
for _, cond := range n.mca.Status.Conditions {
|
||||
if cond.Type == addonv1alpha1.ManagedClusterAddOnConditionProgressing {
|
||||
progressingCond = cond
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, actual := range n.mca.Status.ConfigReferences {
|
||||
if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok {
|
||||
// desired config spec hash doesn't match actual, set to ToApply
|
||||
if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
// desired config spec hash matches actual, but last applied config spec hash doesn't match actual
|
||||
} else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) {
|
||||
n.mcaUpgradeStatus = upgrading
|
||||
switch progressingCond.Reason {
|
||||
case addonv1alpha1.ProgressingReasonInstallFailed, addonv1alpha1.ProgressingReasonUpgradeFailed:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Failed, LastTransitionTime: &progressingCond.LastTransitionTime}
|
||||
case addonv1alpha1.ProgressingReasonInstalling, addonv1alpha1.ProgressingReasonUpgrading:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Progressing, LastTransitionTime: &progressingCond.LastTransitionTime}
|
||||
default:
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Progressing}
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
n.mcaUpgradeStatus = toupgrade
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.ToApply}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n.mcaUpgradeStatus = upgraded
|
||||
// succeed
|
||||
if progressingCond.Reason == addonv1alpha1.ProgressingReasonInstallSucceed || progressingCond.Reason == addonv1alpha1.ProgressingReasonUpgradeSucceed {
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{
|
||||
Status: clusterv1alpha1.Succeeded,
|
||||
LastTransitionTime: &progressingCond.LastTransitionTime,
|
||||
}
|
||||
} else {
|
||||
n.status = &clusterv1alpha1.ClusterRolloutStatus{
|
||||
Status: clusterv1alpha1.Succeeded,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d addonConfigMap) copy() addonConfigMap {
|
||||
@@ -96,7 +112,6 @@ func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferenc
|
||||
graph := &configurationGraph{
|
||||
nodes: []*installStrategyNode{},
|
||||
defaults: &installStrategyNode{
|
||||
maxConcurrency: maxMaxConcurrency,
|
||||
desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{},
|
||||
children: map[string]*addonNode{},
|
||||
},
|
||||
@@ -144,26 +159,53 @@ func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn
|
||||
func (g *configurationGraph) addPlacementNode(
|
||||
installStrategy addonv1alpha1.PlacementStrategy,
|
||||
installProgression addonv1alpha1.InstallProgression,
|
||||
clusters []string,
|
||||
) {
|
||||
placementLister clusterlisterv1beta1.PlacementLister,
|
||||
placementDecisionGetter PlacementDecisionGetter,
|
||||
) error {
|
||||
placementRef := installProgression.PlacementRef
|
||||
installConfigReference := installProgression.ConfigReferences
|
||||
|
||||
node := &installStrategyNode{
|
||||
placementRef: placementRef,
|
||||
maxConcurrency: maxMaxConcurrency,
|
||||
desiredConfigs: g.defaults.desiredConfigs,
|
||||
children: map[string]*addonNode{},
|
||||
clusters: sets.New[string](clusters...),
|
||||
// get placement
|
||||
if placementLister == nil {
|
||||
return fmt.Errorf("invalid placement lister %v", placementLister)
|
||||
}
|
||||
placement, err := placementLister.Placements(placementRef.Namespace).Get(placementRef.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set max concurrency
|
||||
if installStrategy.RolloutStrategy.Type == addonv1alpha1.AddonRolloutStrategyRollingUpdate {
|
||||
if installStrategy.RolloutStrategy.RollingUpdate != nil {
|
||||
node.maxConcurrency = installStrategy.RolloutStrategy.RollingUpdate.MaxConcurrency
|
||||
} else {
|
||||
node.maxConcurrency = defaultMaxConcurrency
|
||||
// new decision tracker
|
||||
pdTracker := clusterv1beta1.NewPlacementDecisionClustersTracker(placement, placementDecisionGetter, nil)
|
||||
|
||||
// refresh and get existing decision clusters
|
||||
err = pdTracker.Refresh()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusters := pdTracker.ExistingClusterGroupsBesides().GetClusters()
|
||||
|
||||
node := &installStrategyNode{
|
||||
placementRef: placementRef,
|
||||
pdTracker: pdTracker,
|
||||
rolloutStrategy: installStrategy.RolloutStrategy,
|
||||
desiredConfigs: g.defaults.desiredConfigs,
|
||||
children: map[string]*addonNode{},
|
||||
clusters: clusters,
|
||||
}
|
||||
|
||||
// Set MaxConcurrency
|
||||
// If progressive strategy is not initialized or MaxConcurrency is not specified, set MaxConcurrency to the default value
|
||||
if node.rolloutStrategy.Type == clusterv1alpha1.Progressive {
|
||||
progressiveStrategy := node.rolloutStrategy.Progressive
|
||||
|
||||
if progressiveStrategy == nil {
|
||||
progressiveStrategy = &clusterv1alpha1.RolloutProgressive{}
|
||||
}
|
||||
if progressiveStrategy.MaxConcurrency.StrVal == "" && progressiveStrategy.MaxConcurrency.IntVal == 0 {
|
||||
progressiveStrategy.MaxConcurrency = placement.Spec.DecisionStrategy.GroupStrategy.ClustersPerDecisionGroup
|
||||
}
|
||||
|
||||
node.rolloutStrategy.Progressive = progressiveStrategy
|
||||
}
|
||||
|
||||
// overrides configuration by install strategy
|
||||
@@ -182,19 +224,32 @@ func (g *configurationGraph) addPlacementNode(
|
||||
}
|
||||
|
||||
// remove addon in defaults and other placements.
|
||||
for _, cluster := range clusters {
|
||||
for _, cluster := range node.clusters.UnsortedList() {
|
||||
if _, ok := g.defaults.children[cluster]; ok {
|
||||
node.addNode(g.defaults.children[cluster].mca)
|
||||
delete(g.defaults.children, cluster)
|
||||
}
|
||||
for _, placement := range g.nodes {
|
||||
if _, ok := placement.children[cluster]; ok {
|
||||
node.addNode(placement.children[cluster].mca)
|
||||
delete(placement.children, cluster)
|
||||
for _, placementNode := range g.nodes {
|
||||
if _, ok := placementNode.children[cluster]; ok {
|
||||
node.addNode(placementNode.children[cluster].mca)
|
||||
delete(placementNode.children, cluster)
|
||||
}
|
||||
}
|
||||
}
|
||||
g.nodes = append(g.nodes, node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *configurationGraph) generateRolloutResult() error {
|
||||
for _, node := range g.nodes {
|
||||
if err := node.generateRolloutResult(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := g.defaults.generateRolloutResult(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode {
|
||||
@@ -206,13 +261,13 @@ func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]
|
||||
return placementNodeMap
|
||||
}
|
||||
|
||||
func (g *configurationGraph) addonToUpdate() []*addonNode {
|
||||
func (g *configurationGraph) getAddonsToUpdate() []*addonNode {
|
||||
var addons []*addonNode
|
||||
for _, node := range g.nodes {
|
||||
addons = append(addons, node.addonToUpdate()...)
|
||||
addons = append(addons, node.getAddonsToUpdate()...)
|
||||
}
|
||||
|
||||
addons = append(addons, g.defaults.addonToUpdate()...)
|
||||
addons = append(addons, g.defaults.getAddonsToUpdate()...)
|
||||
|
||||
return addons
|
||||
}
|
||||
@@ -248,83 +303,86 @@ func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn)
|
||||
}
|
||||
}
|
||||
|
||||
// set addon node upgrade status
|
||||
n.children[addon.Namespace].setUpgradeStatus()
|
||||
// set addon node rollout status
|
||||
n.children[addon.Namespace].setRolloutStatus()
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) addonUpgraded() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgraded {
|
||||
count += 1
|
||||
func (n *installStrategyNode) generateRolloutResult() error {
|
||||
if n.placementRef.Name == "" {
|
||||
// default addons
|
||||
rolloutResult := clusterv1alpha1.RolloutResult{}
|
||||
rolloutResult.ClustersToRollout = map[string]clusterv1alpha1.ClusterRolloutStatus{}
|
||||
for k, addon := range n.children {
|
||||
if addon.status.Status != clusterv1alpha1.Succeeded {
|
||||
rolloutResult.ClustersToRollout[k] = *addon.status
|
||||
}
|
||||
}
|
||||
n.rolloutResult = rolloutResult
|
||||
} else {
|
||||
// placement addons
|
||||
rolloutHandler, err := clusterv1alpha1.NewRolloutHandler(n.pdTracker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, rolloutResult, err := rolloutHandler.GetRolloutCluster(n.rolloutStrategy, n.getUpgradeStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.rolloutResult = rolloutResult
|
||||
}
|
||||
return count
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) addonUpgrading() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.mcaUpgradeStatus == upgrading {
|
||||
count += 1
|
||||
}
|
||||
func (n *installStrategyNode) getUpgradeStatus(clusterName string) clusterv1alpha1.ClusterRolloutStatus {
|
||||
if node, exist := n.children[clusterName]; exist {
|
||||
return *node.status
|
||||
} else {
|
||||
// if children not exist, return succeed status to skip
|
||||
return clusterv1alpha1.ClusterRolloutStatus{Status: clusterv1alpha1.Skip}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// addonToUpdate finds the addons to be updated by placement
|
||||
func (n *installStrategyNode) addonToUpdate() []*addonNode {
|
||||
func (n *installStrategyNode) getAddonsToUpdate() []*addonNode {
|
||||
var addons []*addonNode
|
||||
var clusters []string
|
||||
|
||||
// sort the children by key
|
||||
keys := make([]string, 0, len(n.children))
|
||||
for k := range n.children {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
total := len(n.clusters)
|
||||
if total == 0 {
|
||||
total = len(n.children)
|
||||
// get addon to update from rollout result
|
||||
for c := range n.rolloutResult.ClustersToRollout {
|
||||
clusters = append(clusters, c)
|
||||
}
|
||||
|
||||
length, _ := parseMaxConcurrency(n.maxConcurrency, total)
|
||||
if length == 0 {
|
||||
return addons
|
||||
// sort addons by name
|
||||
sort.Strings(clusters)
|
||||
for _, k := range clusters {
|
||||
addons = append(addons, n.children[k])
|
||||
}
|
||||
|
||||
for i, k := range keys {
|
||||
if (i%length == 0) && len(addons) > 0 {
|
||||
return addons
|
||||
}
|
||||
|
||||
addon := n.children[k]
|
||||
if addon.mcaUpgradeStatus != upgraded {
|
||||
addons = append(addons, addon)
|
||||
}
|
||||
}
|
||||
|
||||
return addons
|
||||
}
|
||||
|
||||
func parseMaxConcurrency(maxConcurrency intstr.IntOrString, total int) (int, error) {
|
||||
var length int
|
||||
|
||||
switch maxConcurrency.Type {
|
||||
case intstr.String:
|
||||
str := maxConcurrency.StrVal
|
||||
f, err := strconv.ParseFloat(str[:len(str)-1], 64)
|
||||
if err != nil {
|
||||
return length, err
|
||||
func (n *installStrategyNode) countAddonUpgradeSucceed() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Succeeded {
|
||||
count += 1
|
||||
}
|
||||
length = int(math.Ceil(f / 100 * float64(total)))
|
||||
case intstr.Int:
|
||||
length = maxConcurrency.IntValue()
|
||||
default:
|
||||
return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
return length, nil
|
||||
func (n *installStrategyNode) countAddonUpgrading() int {
|
||||
count := 0
|
||||
for _, addon := range n.children {
|
||||
if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Progressing {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (n *installStrategyNode) countAddonTimeOut() int {
|
||||
return len(n.rolloutResult.ClustersTimeOut)
|
||||
}
|
||||
|
||||
func desiredConfigsEqual(a, b addonConfigMap) bool {
|
||||
|
||||
@@ -45,8 +45,9 @@ func (d *clusterManagementAddonProgressingReconciler) reconcile(
|
||||
|
||||
setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i],
|
||||
isUpgrade,
|
||||
placementNode.addonUpgrading(),
|
||||
placementNode.addonUpgraded(),
|
||||
placementNode.countAddonUpgrading(),
|
||||
placementNode.countAddonUpgradeSucceed(),
|
||||
placementNode.countAddonTimeOut(),
|
||||
len(placementNode.clusters),
|
||||
)
|
||||
}
|
||||
@@ -96,7 +97,10 @@ func (d *clusterManagementAddonProgressingReconciler) patchMgmtAddonStatus(ctx c
|
||||
return err
|
||||
}
|
||||
|
||||
func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1.InstallProgression, isUpgrade bool, progressing, done, total int) {
|
||||
func setAddOnInstallProgressionsAndLastApplied(
|
||||
installProgression *addonv1alpha1.InstallProgression,
|
||||
isUpgrade bool,
|
||||
progressing, done, timeout, total int) {
|
||||
// always update progressing condition when there is no config
|
||||
// skip update progressing condition when last applied config already the same as desired
|
||||
skip := len(installProgression.ConfigReferences) > 0
|
||||
@@ -116,10 +120,10 @@ func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1
|
||||
condition.Status = metav1.ConditionTrue
|
||||
if isUpgrade {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonUpgrading
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrading...", progressing+done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrading..., %d timeout.", progressing+done, total, timeout)
|
||||
} else {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonInstalling
|
||||
condition.Message = fmt.Sprintf("%d/%d installing...", progressing+done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d installing..., %d timeout.", progressing+done, total, timeout)
|
||||
}
|
||||
} else {
|
||||
for i, configRef := range installProgression.ConfigReferences {
|
||||
@@ -129,10 +133,10 @@ func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1
|
||||
condition.Status = metav1.ConditionFalse
|
||||
if isUpgrade {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors.", done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors, %d timeout.", done, total, timeout)
|
||||
} else {
|
||||
condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed
|
||||
condition.Message = fmt.Sprintf("%d/%d install completed with no errors.", done, total)
|
||||
condition.Message = fmt.Sprintf("%d/%d install completed with no errors, %d timeout.", done, total, timeout)
|
||||
}
|
||||
}
|
||||
meta.SetStatusCondition(&installProgression.Conditions, condition)
|
||||
|
||||
@@ -139,108 +139,139 @@ spec:
|
||||
type: string
|
||||
rolloutStrategy:
|
||||
default:
|
||||
type: UpdateAll
|
||||
type: All
|
||||
description: The rollout strategy to apply addon configurations
|
||||
change. The rollout strategy only watches the addon configurations
|
||||
defined in ClusterManagementAddOn.
|
||||
properties:
|
||||
rollingUpdate:
|
||||
description: Rolling update with placement config params.
|
||||
Present only if the type is RollingUpdate.
|
||||
all:
|
||||
description: All define required fields for RolloutStrategy
|
||||
type All
|
||||
properties:
|
||||
maxConcurrency:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
default: 25%
|
||||
description: 'The maximum concurrently updating
|
||||
number of clusters. Value can be an absolute number
|
||||
(ex: 5) or a percentage of desired addons (ex:
|
||||
10%). Absolute number is calculated from percentage
|
||||
by rounding up. Defaults to 25%. Example: when
|
||||
this is set to 30%, once the addon configs change,
|
||||
the addon on 30% of the selected clusters will
|
||||
adopt the new configs. When the addons with new
|
||||
configs are healthy, the addon on the remaining
|
||||
clusters will be further updated.'
|
||||
x-kubernetes-int-or-string: true
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
rollingUpdateWithCanary:
|
||||
description: Rolling update with placement config params.
|
||||
Present only if the type is RollingUpdateWithCanary.
|
||||
progressive:
|
||||
description: Progressive define required fields for
|
||||
RolloutStrategy type Progressive
|
||||
properties:
|
||||
mandatoryDecisionGroups:
|
||||
description: List of the decision groups names or
|
||||
indexes to apply the workload first and fail if
|
||||
workload did not reach successful state. GroupName
|
||||
or GroupIndex must match with the decisionGroups
|
||||
defined in the placement's decisionStrategy
|
||||
items:
|
||||
description: MandatoryDecisionGroup set the decision
|
||||
group name or group index. GroupName is considered
|
||||
first to select the decisionGroups then GroupIndex.
|
||||
properties:
|
||||
groupIndex:
|
||||
description: GroupIndex of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-index
|
||||
format: int32
|
||||
type: integer
|
||||
groupName:
|
||||
description: GroupName of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-name
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
maxConcurrency:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
default: 25%
|
||||
description: 'The maximum concurrently updating
|
||||
number of clusters. Value can be an absolute number
|
||||
(ex: 5) or a percentage of desired addons (ex:
|
||||
10%). Absolute number is calculated from percentage
|
||||
by rounding up. Defaults to 25%. Example: when
|
||||
this is set to 30%, once the addon configs change,
|
||||
the addon on 30% of the selected clusters will
|
||||
adopt the new configs. When the addons with new
|
||||
configs are healthy, the addon on the remaining
|
||||
clusters will be further updated.'
|
||||
description: MaxConcurrency is the max number of
|
||||
clusters to deploy workload concurrently. The
|
||||
default value for MaxConcurrency is determined
|
||||
from the clustersPerDecisionGroup defined in the
|
||||
placement->DecisionStrategy.
|
||||
pattern: ^((100|[0-9]{1,2})%|[0-9]+)$
|
||||
x-kubernetes-int-or-string: true
|
||||
placement:
|
||||
description: Canary placement reference.
|
||||
properties:
|
||||
name:
|
||||
description: Name is the name of the placement
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace is the namespace of the
|
||||
placement
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- namespace
|
||||
type: object
|
||||
required:
|
||||
- placement
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
progressivePerGroup:
|
||||
description: ProgressivePerGroup define required fields
|
||||
for RolloutStrategy type ProgressivePerGroup
|
||||
properties:
|
||||
mandatoryDecisionGroups:
|
||||
description: List of the decision groups names or
|
||||
indexes to apply the workload first and fail if
|
||||
workload did not reach successful state. GroupName
|
||||
or GroupIndex must match with the decisionGroups
|
||||
defined in the placement's decisionStrategy
|
||||
items:
|
||||
description: MandatoryDecisionGroup set the decision
|
||||
group name or group index. GroupName is considered
|
||||
first to select the decisionGroups then GroupIndex.
|
||||
properties:
|
||||
groupIndex:
|
||||
description: GroupIndex of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-index
|
||||
format: int32
|
||||
type: integer
|
||||
groupName:
|
||||
description: GroupName of the decision group
|
||||
should match the placementDecisions label
|
||||
value with label key cluster.open-cluster-management.io/decision-group-name
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
timeout:
|
||||
default: None
|
||||
description: Timeout define how long workload applier
|
||||
controller will wait till workload reach successful
|
||||
state in the cluster. Timeout default value is
|
||||
None meaning the workload applier will not proceed
|
||||
apply workload to other clusters if did not reach
|
||||
the successful state. Timeout must be defined
|
||||
in [0-9h]|[0-9m]|[0-9s] format examples; 2h ,
|
||||
90m , 360s
|
||||
pattern: ^(([0-9])+[h|m|s])|None$
|
||||
type: string
|
||||
type: object
|
||||
type:
|
||||
default: UpdateAll
|
||||
description: "Type is the type of the rollout strategy,
|
||||
it supports UpdateAll, RollingUpdate and RollingUpdateWithCanary:
|
||||
- UpdateAll: when configs change, apply the new configs
|
||||
to all the selected clusters at once. This is the
|
||||
default strategy. - RollingUpdate: when configs change,
|
||||
apply the new configs to all the selected clusters
|
||||
with the concurrence rate defined in MaxConcurrency.
|
||||
- RollingUpdateWithCanary: when configs change, wait
|
||||
and check if add-ons on the canary placement selected
|
||||
clusters have applied the new configs and are healthy,
|
||||
then apply the new configs to all the selected clusters
|
||||
with the concurrence rate defined in MaxConcurrency.
|
||||
\n The field lastKnownGoodConfig in the status record
|
||||
the last successfully applied spec hash of canary
|
||||
placement. If the config spec hash changes after the
|
||||
canary is passed and before the rollout is done, the
|
||||
current rollout will continue, then roll out to the
|
||||
latest change. \n For example, the addon configs have
|
||||
spec hash A. The canary is passed and the lastKnownGoodConfig
|
||||
would be A, and all the selected clusters are rolling
|
||||
out to A. Then the config spec hash changes to B.
|
||||
At this time, the clusters will continue rolling out
|
||||
to A. When the rollout is done and canary passed B,
|
||||
the lastKnownGoodConfig would be B and all the clusters
|
||||
will start rolling out to B. \n The canary placement
|
||||
does not have to be a subset of the install placement,
|
||||
and it is more like a reference for finding and checking
|
||||
canary clusters before upgrading all. To trigger the
|
||||
rollout on the canary clusters, you can define another
|
||||
rollout strategy with the type RollingUpdate, or even
|
||||
manually upgrade the addons on those clusters."
|
||||
default: All
|
||||
description: Rollout strategy Types are All, Progressive
|
||||
and ProgressivePerGroup 1) All means apply the workload
|
||||
to all clusters in the decision groups at once. 2)
|
||||
Progressive means apply the workload to the selected
|
||||
clusters progressively per cluster. The workload will
|
||||
not be applied to the next cluster unless one of the
|
||||
current applied clusters reach the successful state
|
||||
or timeout. 3) ProgressivePerGroup means apply the
|
||||
workload to decisionGroup clusters progressively per
|
||||
group. The workload will not be applied to the next
|
||||
decisionGroup unless all clusters in the current group
|
||||
reach the successful state or timeout.
|
||||
enum:
|
||||
- UpdateAll
|
||||
- RollingUpdate
|
||||
- RollingUpdateWithCanary
|
||||
- All
|
||||
- Progressive
|
||||
- ProgressivePerGroup
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
|
||||
86
vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go
generated
vendored
86
vendor/open-cluster-management.io/api/addon/v1alpha1/types_clustermanagementaddon.go
generated
vendored
@@ -2,7 +2,7 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -187,89 +187,9 @@ type PlacementStrategy struct {
|
||||
Configs []AddOnConfig `json:"configs,omitempty"`
|
||||
// The rollout strategy to apply addon configurations change.
|
||||
// The rollout strategy only watches the addon configurations defined in ClusterManagementAddOn.
|
||||
// +kubebuilder:default={type: UpdateAll}
|
||||
// +kubebuilder:default={type: All}
|
||||
// +optional
|
||||
RolloutStrategy RolloutStrategy `json:"rolloutStrategy,omitempty"`
|
||||
}
|
||||
|
||||
// RolloutStrategy represents the rollout strategy of the add-on configuration.
|
||||
type RolloutStrategy struct {
|
||||
// Type is the type of the rollout strategy, it supports UpdateAll, RollingUpdate and RollingUpdateWithCanary:
|
||||
// - UpdateAll: when configs change, apply the new configs to all the selected clusters at once.
|
||||
// This is the default strategy.
|
||||
// - RollingUpdate: when configs change, apply the new configs to all the selected clusters with
|
||||
// the concurrence rate defined in MaxConcurrency.
|
||||
// - RollingUpdateWithCanary: when configs change, wait and check if add-ons on the canary placement
|
||||
// selected clusters have applied the new configs and are healthy, then apply the new configs to
|
||||
// all the selected clusters with the concurrence rate defined in MaxConcurrency.
|
||||
//
|
||||
// The field lastKnownGoodConfig in the status record the last successfully applied
|
||||
// spec hash of canary placement. If the config spec hash changes after the canary is passed and
|
||||
// before the rollout is done, the current rollout will continue, then roll out to the latest change.
|
||||
//
|
||||
// For example, the addon configs have spec hash A. The canary is passed and the lastKnownGoodConfig
|
||||
// would be A, and all the selected clusters are rolling out to A.
|
||||
// Then the config spec hash changes to B. At this time, the clusters will continue rolling out to A.
|
||||
// When the rollout is done and canary passed B, the lastKnownGoodConfig would be B and
|
||||
// all the clusters will start rolling out to B.
|
||||
//
|
||||
// The canary placement does not have to be a subset of the install placement, and it is more like a
|
||||
// reference for finding and checking canary clusters before upgrading all. To trigger the rollout
|
||||
// on the canary clusters, you can define another rollout strategy with the type RollingUpdate, or even
|
||||
// manually upgrade the addons on those clusters.
|
||||
//
|
||||
// +kubebuilder:validation:Enum=UpdateAll;RollingUpdate;RollingUpdateWithCanary
|
||||
// +kubebuilder:default:=UpdateAll
|
||||
// +optional
|
||||
Type string `json:"type"`
|
||||
|
||||
// Rolling update with placement config params. Present only if the type is RollingUpdate.
|
||||
// +optional
|
||||
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
|
||||
|
||||
// Rolling update with placement config params. Present only if the type is RollingUpdateWithCanary.
|
||||
// +optional
|
||||
RollingUpdateWithCanary *RollingUpdateWithCanary `json:"rollingUpdateWithCanary,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// AddonRolloutStrategyUpdateAll is the addon rollout strategy representing apply the new configs to
|
||||
// all the selected clusters at once.
|
||||
AddonRolloutStrategyUpdateAll string = "UpdateAll"
|
||||
// AddonRolloutStrategyRollingUpdate is the addon rollout strategy representing apply the new configs to
|
||||
// all the selected clusters with the concurrency rate.
|
||||
AddonRolloutStrategyRollingUpdate string = "RollingUpdate"
|
||||
// AddonRolloutStrategyRollingUpdate is the addon rollout strategy representing wait and check
|
||||
// if add-ons on the canary have applied the new configs, then apply the new configs to
|
||||
// all the selected clusters with the concurrency rate.
|
||||
AddonRolloutStrategyRollingUpdateWithCanary string = "RollingUpdateWithCanary"
|
||||
)
|
||||
|
||||
// RollingUpdate represents the behavior to rolling update add-on configurations
|
||||
// on the selected clusters.
|
||||
type RollingUpdate struct {
|
||||
// The maximum concurrently updating number of clusters.
|
||||
// Value can be an absolute number (ex: 5) or a percentage of desired addons (ex: 10%).
|
||||
// Absolute number is calculated from percentage by rounding up.
|
||||
// Defaults to 25%.
|
||||
// Example: when this is set to 30%, once the addon configs change, the addon on 30% of the selected clusters
|
||||
// will adopt the new configs. When the addons with new configs are healthy, the addon on the remaining clusters
|
||||
// will be further updated.
|
||||
// +kubebuilder:default:="25%"
|
||||
// +optional
|
||||
MaxConcurrency intstr.IntOrString `json:"maxConcurrency,omitempty"`
|
||||
}
|
||||
|
||||
// RollingUpdateWithCanary represents the canary placement and behavior to rolling update add-on configurations
|
||||
// on the selected clusters.
|
||||
type RollingUpdateWithCanary struct {
|
||||
// Canary placement reference.
|
||||
// +kubebuilder:validation:Required
|
||||
// +required
|
||||
Placement PlacementRef `json:"placement,omitempty"`
|
||||
|
||||
// the behavior to rolling update add-on configurations.
|
||||
RollingUpdate `json:",inline"`
|
||||
RolloutStrategy clusterv1alpha1.RolloutStrategy `json:"rolloutStrategy,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterManagementAddOnStatus represents the current status of cluster management add-on.
|
||||
|
||||
61
vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go
generated
vendored
61
vendor/open-cluster-management.io/api/addon/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@@ -952,67 +952,6 @@ func (in *RegistrationSpec) DeepCopy() *RegistrationSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
|
||||
*out = *in
|
||||
out.MaxConcurrency = in.MaxConcurrency
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
|
||||
func (in *RollingUpdate) DeepCopy() *RollingUpdate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RollingUpdate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RollingUpdateWithCanary) DeepCopyInto(out *RollingUpdateWithCanary) {
|
||||
*out = *in
|
||||
out.Placement = in.Placement
|
||||
out.RollingUpdate = in.RollingUpdate
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateWithCanary.
|
||||
func (in *RollingUpdateWithCanary) DeepCopy() *RollingUpdateWithCanary {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RollingUpdateWithCanary)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) {
|
||||
*out = *in
|
||||
if in.RollingUpdate != nil {
|
||||
in, out := &in.RollingUpdate, &out.RollingUpdate
|
||||
*out = new(RollingUpdate)
|
||||
**out = **in
|
||||
}
|
||||
if in.RollingUpdateWithCanary != nil {
|
||||
in, out := &in.RollingUpdateWithCanary, &out.RollingUpdateWithCanary
|
||||
*out = new(RollingUpdateWithCanary)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy.
|
||||
func (in *RolloutStrategy) DeepCopy() *RolloutStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RolloutStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SigningCARef) DeepCopyInto(out *SigningCARef) {
|
||||
*out = *in
|
||||
|
||||
@@ -320,35 +320,6 @@ func (PlacementStrategy) SwaggerDoc() map[string]string {
|
||||
return map_PlacementStrategy
|
||||
}
|
||||
|
||||
var map_RollingUpdate = map[string]string{
|
||||
"": "RollingUpdate represents the behavior to rolling update add-on configurations on the selected clusters.",
|
||||
"maxConcurrency": "The maximum concurrently updating number of clusters. Value can be an absolute number (ex: 5) or a percentage of desired addons (ex: 10%). Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, once the addon configs change, the addon on 30% of the selected clusters will adopt the new configs. When the addons with new configs are healthy, the addon on the remaining clusters will be further updated.",
|
||||
}
|
||||
|
||||
func (RollingUpdate) SwaggerDoc() map[string]string {
|
||||
return map_RollingUpdate
|
||||
}
|
||||
|
||||
var map_RollingUpdateWithCanary = map[string]string{
|
||||
"": "RollingUpdateWithCanary represents the canary placement and behavior to rolling update add-on configurations on the selected clusters.",
|
||||
"placement": "Canary placement reference.",
|
||||
}
|
||||
|
||||
func (RollingUpdateWithCanary) SwaggerDoc() map[string]string {
|
||||
return map_RollingUpdateWithCanary
|
||||
}
|
||||
|
||||
var map_RolloutStrategy = map[string]string{
|
||||
"": "RolloutStrategy represents the rollout strategy of the add-on configuration.",
|
||||
"type": "Type is the type of the rollout strategy, it supports UpdateAll, RollingUpdate and RollingUpdateWithCanary: - UpdateAll: when configs change, apply the new configs to all the selected clusters at once.\n This is the default strategy.\n- RollingUpdate: when configs change, apply the new configs to all the selected clusters with\n the concurrence rate defined in MaxConcurrency.\n- RollingUpdateWithCanary: when configs change, wait and check if add-ons on the canary placement\n selected clusters have applied the new configs and are healthy, then apply the new configs to\n all the selected clusters with the concurrence rate defined in MaxConcurrency.\n\n The field lastKnownGoodConfig in the status record the last successfully applied\n spec hash of canary placement. If the config spec hash changes after the canary is passed and\n before the rollout is done, the current rollout will continue, then roll out to the latest change.\n\n For example, the addon configs have spec hash A. The canary is passed and the lastKnownGoodConfig\n would be A, and all the selected clusters are rolling out to A.\n Then the config spec hash changes to B. At this time, the clusters will continue rolling out to A.\n When the rollout is done and canary passed B, the lastKnownGoodConfig would be B and\n all the clusters will start rolling out to B.\n\n The canary placement does not have to be a subset of the install placement, and it is more like a\n reference for finding and checking canary clusters before upgrading all. To trigger the rollout\n on the canary clusters, you can define another rollout strategy with the type RollingUpdate, or even\n manually upgrade the addons on those clusters.",
|
||||
"rollingUpdate": "Rolling update with placement config params. Present only if the type is RollingUpdate.",
|
||||
"rollingUpdateWithCanary": "Rolling update with placement config params. Present only if the type is RollingUpdateWithCanary.",
|
||||
}
|
||||
|
||||
func (RolloutStrategy) SwaggerDoc() map[string]string {
|
||||
return map_RolloutStrategy
|
||||
}
|
||||
|
||||
var map_ConfigReference = map[string]string{
|
||||
"": "ConfigReference is a reference to the current add-on configuration. This resource is used to locate the configuration resource for the current add-on.",
|
||||
"lastObservedGeneration": "Deprecated: Use LastAppliedConfig instead lastObservedGeneration is the observed generation of the add-on configuration.",
|
||||
|
||||
36
vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go
generated
vendored
36
vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go
generated
vendored
@@ -59,13 +59,13 @@ func NewPlacementDecisionClustersTrackerWithGroups(placement *Placement, pdl Pla
|
||||
return pdct
|
||||
}
|
||||
|
||||
// Get updates the tracker's decisionClusters and returns added and deleted cluster names.
|
||||
func (pdct *PlacementDecisionClustersTracker) Get() (sets.Set[string], sets.Set[string], error) {
|
||||
// Refresh refreshes the tracker's decisionClusters.
|
||||
func (pdct *PlacementDecisionClustersTracker) Refresh() error {
|
||||
pdct.lock.Lock()
|
||||
defer pdct.lock.Unlock()
|
||||
|
||||
if pdct.placement == nil || pdct.placementDecisionGetter == nil {
|
||||
return nil, nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the generated PlacementDecisions
|
||||
@@ -74,16 +74,15 @@ func (pdct *PlacementDecisionClustersTracker) Get() (sets.Set[string], sets.Set[
|
||||
})
|
||||
decisions, err := pdct.placementDecisionGetter.List(decisionSelector, pdct.placement.Namespace)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to list PlacementDecisions: %w", err)
|
||||
return fmt.Errorf("failed to list PlacementDecisions: %w", err)
|
||||
}
|
||||
|
||||
// Get the decision cluster names and groups
|
||||
newScheduledClusters := sets.New[string]()
|
||||
newScheduledClusterGroups := map[GroupKey]sets.Set[string]{}
|
||||
for _, d := range decisions {
|
||||
groupKey, err := parseGroupKeyFromDecision(d)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if _, exist := newScheduledClusterGroups[groupKey]; !exist {
|
||||
@@ -91,20 +90,33 @@ func (pdct *PlacementDecisionClustersTracker) Get() (sets.Set[string], sets.Set[
|
||||
}
|
||||
|
||||
for _, sd := range d.Status.Decisions {
|
||||
newScheduledClusters.Insert(sd.ClusterName)
|
||||
newScheduledClusterGroups[groupKey].Insert(sd.ClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
// Compare the difference
|
||||
existingScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters()
|
||||
added := newScheduledClusters.Difference(existingScheduledClusters)
|
||||
deleted := existingScheduledClusters.Difference(newScheduledClusters)
|
||||
|
||||
// Update the existing decision cluster groups
|
||||
pdct.existingScheduledClusterGroups = newScheduledClusterGroups
|
||||
pdct.generateGroupsNameIndex()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterChanges updates the tracker's decisionClusters and returns added and deleted cluster names.
|
||||
func (pdct *PlacementDecisionClustersTracker) GetClusterChanges() (sets.Set[string], sets.Set[string], error) {
|
||||
// Get existing clusters
|
||||
existingScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters()
|
||||
|
||||
// Refresh clusters
|
||||
err := pdct.Refresh()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
newScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters()
|
||||
|
||||
// Compare the difference
|
||||
added := newScheduledClusters.Difference(existingScheduledClusters)
|
||||
deleted := existingScheduledClusters.Difference(newScheduledClusters)
|
||||
|
||||
return added, deleted, nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user