mirror of
https://github.com/fluxcd/flagger.git
synced 2026-04-15 06:57:34 +00:00
fix initial deployment downtime
Signed-off-by: miguelvalerio <miguelgomes.valerio@gmail.com>
This commit is contained in:
committed by
Sanskar Jaiswal
parent
2944581a70
commit
b25e12d45d
@@ -91,8 +91,7 @@ func (c *DaemonSetController) ScaleFromZero(cd *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize creates the primary DaemonSet, scales down the canary DaemonSet,
|
||||
// and returns the pod selector label and container ports
|
||||
// Initialize creates the primary DaemonSet if it does not exist.
|
||||
func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary) (err error) {
|
||||
err = c.createPrimaryDaemonSet(cd, c.includeLabelPrefix)
|
||||
if err != nil {
|
||||
@@ -105,13 +104,8 @@ func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary) (err error) {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("Scaling down DaemonSet %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.ScaleToZero(cd); err != nil {
|
||||
return fmt.Errorf("ScaleToZero failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -45,8 +45,7 @@ type DeploymentController struct {
|
||||
includeLabelPrefix []string
|
||||
}
|
||||
|
||||
// Initialize creates the primary deployment, hpa,
|
||||
// scales to zero the canary deployment and returns the pod selector label and container ports
|
||||
// Initialize creates the primary deployment if it does not exist.
|
||||
func (c *DeploymentController) Initialize(cd *flaggerv1.Canary) (err error) {
|
||||
if err := c.createPrimaryDeployment(cd, c.includeLabelPrefix); err != nil {
|
||||
return fmt.Errorf("createPrimaryDeployment failed: %w", err)
|
||||
@@ -58,12 +57,6 @@ func (c *DeploymentController) Initialize(cd *flaggerv1.Canary) (err error) {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("Scaling down Deployment %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.ScaleToZero(cd); err != nil {
|
||||
return fmt.Errorf("scaling down canary deployment %s.%s failed: %w", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -248,6 +248,16 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
return
|
||||
}
|
||||
|
||||
// scale down the canary target to 0 replicas after the service is pointing to the primary target
|
||||
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("Scaling down %s %s.%s", cd.Spec.TargetRef.Kind, cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := canaryController.ScaleToZero(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "scaling down canary %s %s.%s failed: %v", cd.Spec.TargetRef.Kind, cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// take over an existing virtual service or ingress
|
||||
// runs after the primary is ready to ensure zero downtime
|
||||
if !strings.HasPrefix(provider, flaggerv1.AppMeshProvider) {
|
||||
|
||||
@@ -46,9 +46,14 @@ func TestScheduler_DaemonSetNewRevision(t *testing.T) {
|
||||
mocks := newDaemonSetFixture(nil)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default")
|
||||
|
||||
// check if ScaleToZero was performed
|
||||
ds, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, ds.Spec.Template.Spec.NodeSelector, "flagger.app/scale-to-zero")
|
||||
|
||||
// update
|
||||
dae2 := newDaemonSetTestDaemonSetV2()
|
||||
_, err := mocks.kubeClient.AppsV1().DaemonSets("default").Update(context.TODO(), dae2, metav1.UpdateOptions{})
|
||||
_, err = mocks.kubeClient.AppsV1().DaemonSets("default").Update(context.TODO(), dae2, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// detect changes
|
||||
|
||||
@@ -54,9 +54,14 @@ func TestScheduler_DeploymentNewRevision(t *testing.T) {
|
||||
// initialization done
|
||||
mocks.ctrl.advanceCanary("podinfo", "default")
|
||||
|
||||
// check if ScaleToZero was performed
|
||||
dp, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(0), *dp.Spec.Replicas)
|
||||
|
||||
// update
|
||||
dep2 := newDeploymentTestDeploymentV2()
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(context.TODO(), dep2, metav1.UpdateOptions{})
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(context.TODO(), dep2, metav1.UpdateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// detect changes
|
||||
|
||||
Reference in New Issue
Block a user