mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53be3e07d2 | ||
|
|
2eb2ae52cd | ||
|
|
7bcc76eca0 | ||
|
|
295f5d7b39 | ||
|
|
8766523279 | ||
|
|
b02a6da614 | ||
|
|
89d7cb1b04 | ||
|
|
59d18de753 | ||
|
|
e1d8703a15 | ||
|
|
1ba595bc6f | ||
|
|
446a2b976c | ||
|
|
9af6ade54d | ||
|
|
3fbe62aa47 | ||
|
|
4454c9b5b5 | ||
|
|
c2cf9bf4b1 | ||
|
|
3afc7978bd |
@@ -92,6 +92,17 @@ jobs:
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-tests.sh
|
||||
|
||||
e2e-kubernetes-svc-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-svc-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
|
||||
13
CHANGELOG.md
13
CHANGELOG.md
@@ -2,6 +2,19 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.20.4 (2019-12-03)
|
||||
|
||||
Adds support for taking over a running deployment without disruption
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add initialization phase to Kubernetes router [#384](https://github.com/weaveworks/flagger/pull/384)
|
||||
- Add canary controller interface and Kubernetes deployment kind implementation [#378](https://github.com/weaveworks/flagger/pull/378)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Skip primary check on skip analysis [#380](https://github.com/weaveworks/flagger/pull/380)
|
||||
|
||||
## 0.20.3 (2019-11-13)
|
||||
|
||||
Adds wrk to load tester tools and the App Mesh gateway chart to Flagger Helm repository
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.20.3
|
||||
image: weaveworks/flagger:0.20.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.20.3
|
||||
appVersion: 0.20.3
|
||||
version: 0.20.4
|
||||
appVersion: 0.20.4
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.20.3
|
||||
tag: 0.20.4
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.3.0
|
||||
appVersion: 6.2.5
|
||||
version: 1.4.0
|
||||
appVersion: 6.5.1
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
home: https://flagger.app
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 6.2.5
|
||||
tag: 6.5.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -10,16 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -30,6 +20,18 @@ import (
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/transport"
|
||||
_ "k8s.io/code-generator/cmd/client-gen/generators"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -178,6 +180,12 @@ func main() {
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
|
||||
configTracker := canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
}
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
@@ -187,11 +195,11 @@ func main() {
|
||||
controlLoopInterval,
|
||||
logger,
|
||||
notifierClient,
|
||||
canaryFactory,
|
||||
routerFactory,
|
||||
observerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.20.3
|
||||
newTag: 0.20.4
|
||||
|
||||
22
pkg/canary/controller.go
Normal file
22
pkg/canary/controller.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
type Controller interface {
|
||||
IsPrimaryReady(canary *flaggerv1.Canary) (bool, error)
|
||||
IsCanaryReady(canary *flaggerv1.Canary) (bool, error)
|
||||
GetMetadata(canary *flaggerv1.Canary) (string, map[string]int32, error)
|
||||
SyncStatus(canary *flaggerv1.Canary, status flaggerv1.CanaryStatus) error
|
||||
SetStatusFailedChecks(canary *flaggerv1.Canary, val int) error
|
||||
SetStatusWeight(canary *flaggerv1.Canary, val int) error
|
||||
SetStatusIterations(canary *flaggerv1.Canary, val int) error
|
||||
SetStatusPhase(canary *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error
|
||||
Initialize(canary *flaggerv1.Canary, skipLivenessChecks bool) error
|
||||
Promote(canary *flaggerv1.Canary) error
|
||||
HasTargetChanged(canary *flaggerv1.Canary) (bool, error)
|
||||
HaveDependenciesChanged(canary *flaggerv1.Canary) (bool, error)
|
||||
Scale(canary *flaggerv1.Canary, replicas int32) error
|
||||
ScaleFromZero(canary *flaggerv1.Canary) error
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v2beta1"
|
||||
@@ -21,52 +20,53 @@ import (
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// Deployer is managing the operations for Kubernetes deployment kind
|
||||
type Deployer struct {
|
||||
KubeClient kubernetes.Interface
|
||||
FlaggerClient clientset.Interface
|
||||
Logger *zap.SugaredLogger
|
||||
ConfigTracker ConfigTracker
|
||||
Labels []string
|
||||
// DeploymentController is managing the operations for Kubernetes Deployment kind
|
||||
type DeploymentController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
configTracker ConfigTracker
|
||||
labels []string
|
||||
}
|
||||
|
||||
// Initialize creates the primary deployment, hpa,
|
||||
// scales to zero the canary deployment and returns the pod selector label and container ports
|
||||
func (c *Deployer) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (label string, ports map[string]int32, err error) {
|
||||
func (c *DeploymentController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
label, ports, err = c.createPrimaryDeployment(cd)
|
||||
|
||||
err = c.createPrimaryDeployment(cd)
|
||||
if err != nil {
|
||||
return "", ports, fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
|
||||
if !skipLivenessChecks {
|
||||
if !skipLivenessChecks && !cd.Spec.SkipAnalysis {
|
||||
_, readyErr := c.IsPrimaryReady(cd)
|
||||
if readyErr != nil {
|
||||
return "", ports, readyErr
|
||||
return readyErr
|
||||
}
|
||||
}
|
||||
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.Scale(cd, 0); err != nil {
|
||||
return "", ports, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
|
||||
if err := c.reconcilePrimaryHpa(cd, true); err != nil {
|
||||
return "", ports, fmt.Errorf("creating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
return fmt.Errorf("creating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
}
|
||||
return label, ports, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Promote copies the pod spec, secrets and config maps from canary to primary
|
||||
func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -80,7 +80,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
@@ -89,11 +89,11 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
}
|
||||
|
||||
// promote secrets and config maps
|
||||
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
primaryCopy.Spec.Strategy = canary.Spec.Strategy
|
||||
|
||||
// update spec with primary secrets and config maps
|
||||
primaryCopy.Spec.Template.Spec = c.ConfigTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
|
||||
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
|
||||
|
||||
// update pod annotations to ensure a rolling update
|
||||
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
|
||||
@@ -116,7 +116,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
|
||||
|
||||
// apply update
|
||||
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
|
||||
primaryCopy.GetName(), primaryCopy.Namespace, err)
|
||||
@@ -132,10 +132,10 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasDeploymentChanged returns true if the canary deployment pod spec has changed
|
||||
func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
// HasTargetChanged returns true if the canary deployment pod spec has changed
|
||||
func (c *DeploymentController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -143,31 +143,13 @@ func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.LastAppliedSpec == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newHash, err := hashstructure.Hash(canary.Spec.Template, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("hash error %v", err)
|
||||
}
|
||||
|
||||
// do not trigger a canary deployment on manual rollback
|
||||
if cd.Status.LastPromotedSpec == fmt.Sprintf("%d", newHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if cd.Status.LastAppliedSpec != fmt.Sprintf("%d", newHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return hasSpecChanged(cd, canary.Spec.Template)
|
||||
}
|
||||
|
||||
// Scale sets the canary deployment replicas
|
||||
func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
func (c *DeploymentController) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -178,16 +160,16 @@ func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = int32p(replicas)
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
|
||||
func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -202,18 +184,18 @@ func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = replicas
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[string]int32, error) {
|
||||
// GetMetadata returns the pod label selector and svc ports
|
||||
func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
canaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return "", nil, fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
|
||||
@@ -236,19 +218,39 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[st
|
||||
ports = p
|
||||
}
|
||||
|
||||
primaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
return label, ports, nil
|
||||
}
|
||||
func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
label, err := c.getSelectorLabel(canaryDep)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// create primary secrets and config maps
|
||||
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return err
|
||||
}
|
||||
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return "", nil, err
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
replicas := int32(1)
|
||||
@@ -289,25 +291,25 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[st
|
||||
Annotations: annotations,
|
||||
},
|
||||
// update spec with the primary secrets and config maps
|
||||
Spec: c.ConfigTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
|
||||
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return label, ports, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
hpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
|
||||
hpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("HorizontalPodAutoscaler %s.%s not found, retrying",
|
||||
@@ -328,7 +330,7 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
}
|
||||
|
||||
primaryHpaName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
|
||||
primaryHpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
|
||||
primaryHpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
|
||||
|
||||
// create HPA
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -348,11 +350,11 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
Spec: hpaSpec,
|
||||
}
|
||||
|
||||
_, err = c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
|
||||
_, err = c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -370,11 +372,11 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
hpaClone.Spec.MinReplicas = hpaSpec.MinReplicas
|
||||
hpaClone.Spec.Metrics = hpaSpec.Metrics
|
||||
|
||||
_, upErr := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Update(hpaClone)
|
||||
_, upErr := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Update(hpaClone)
|
||||
if upErr != nil {
|
||||
return upErr
|
||||
}
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s updated", primaryHpa.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s updated", primaryHpa.GetName(), cd.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,7 +384,7 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
|
||||
}
|
||||
|
||||
// makeAnnotations appends an unique ID to annotations map
|
||||
func (c *Deployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
func (c *DeploymentController) makeAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
idKey := "flagger-id"
|
||||
res := make(map[string]string)
|
||||
uuid := make([]byte, 16)
|
||||
@@ -405,8 +407,8 @@ func (c *Deployer) makeAnnotations(annotations map[string]string) (map[string]st
|
||||
}
|
||||
|
||||
// getSelectorLabel returns the selector match label
|
||||
func (c *Deployer) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
|
||||
for _, l := range c.Labels {
|
||||
func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
|
||||
for _, l := range c.labels {
|
||||
if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok {
|
||||
return l, nil
|
||||
}
|
||||
@@ -420,8 +422,12 @@ var sidecars = map[string]bool{
|
||||
"envoy": true,
|
||||
}
|
||||
|
||||
func (c *DeploymentController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
return c.configTracker.HasConfigChanged(cd)
|
||||
}
|
||||
|
||||
// getPorts returns a list of all container ports
|
||||
func (c *Deployer) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
|
||||
func (c *DeploymentController) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
|
||||
ports := make(map[string]int32)
|
||||
|
||||
for _, container := range deployment.Spec.Template.Spec.Containers {
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -107,7 +107,7 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
isNew, err := mocks.deployer.HasDeploymentChanged(mocks.canary)
|
||||
isNew, err := mocks.deployer.HasTargetChanged(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_IsReady(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Error("Expected primary readiness check to fail")
|
||||
}
|
||||
@@ -203,7 +203,7 @@ func TestCanaryDeployer_IsReady(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -225,7 +225,7 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -286,7 +286,7 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
|
||||
func TestCanaryDeployer_Scale(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
err := mocks.deployer.Initialize(mocks.canary, true)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
59
pkg/canary/factory.go
Normal file
59
pkg/canary/factory.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
type Factory struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
configTracker ConfigTracker
|
||||
labels []string
|
||||
}
|
||||
|
||||
func NewFactory(kubeClient kubernetes.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
configTracker ConfigTracker,
|
||||
labels []string,
|
||||
logger *zap.SugaredLogger) *Factory {
|
||||
return &Factory{
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
logger: logger,
|
||||
configTracker: configTracker,
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
func (factory *Factory) Controller(kind string) Controller {
|
||||
deploymentCtrl := &DeploymentController{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
labels: factory.labels,
|
||||
configTracker: ConfigTracker{
|
||||
Logger: factory.logger,
|
||||
KubeClient: factory.kubeClient,
|
||||
FlaggerClient: factory.flaggerClient,
|
||||
},
|
||||
}
|
||||
serviceCtrl := &ServiceController{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
}
|
||||
|
||||
switch {
|
||||
case kind == "Deployment":
|
||||
return deploymentCtrl
|
||||
case kind == "Service":
|
||||
return serviceCtrl
|
||||
default:
|
||||
return deploymentCtrl
|
||||
}
|
||||
|
||||
}
|
||||
@@ -20,7 +20,7 @@ type Mocks struct {
|
||||
canary *flaggerv1.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
deployer Deployer
|
||||
deployer DeploymentController
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
@@ -43,12 +43,12 @@ func SetupMocks() Mocks {
|
||||
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
|
||||
deployer := Deployer{
|
||||
FlaggerClient: flaggerClient,
|
||||
KubeClient: kubeClient,
|
||||
Logger: logger,
|
||||
Labels: []string{"app", "name"},
|
||||
ConfigTracker: ConfigTracker{
|
||||
deployer := DeploymentController{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
labels: []string{"app", "name"},
|
||||
configTracker: ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
// IsPrimaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *Deployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
func (c *DeploymentController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
@@ -39,9 +39,9 @@ func (c *Deployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
// IsCanaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *Deployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
func (c *DeploymentController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -64,7 +64,7 @@ func (c *Deployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
|
||||
// isDeploymentReady determines if a deployment is ready by checking the status conditions
|
||||
// if a deployment has exceeded the progress deadline it returns a non retriable error
|
||||
func (c *Deployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
|
||||
func (c *DeploymentController) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
|
||||
retriable := true
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
progress := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
@@ -99,7 +99,7 @@ func (c *Deployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Deployer) getDeploymentCondition(
|
||||
func (c *DeploymentController) getDeploymentCondition(
|
||||
status appsv1.DeploymentStatus,
|
||||
conditionType appsv1.DeploymentConditionType,
|
||||
) *appsv1.DeploymentCondition {
|
||||
|
||||
247
pkg/canary/service_controller.go
Normal file
247
pkg/canary/service_controller.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ex "github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// ServiceController is managing the operations for Kubernetes service kind
|
||||
type ServiceController struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
func (c *ServiceController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusFailedChecks(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *ServiceController) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusWeight(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
// SetStatusIterations updates the canary status iterations value
|
||||
func (c *ServiceController) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusIterations(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
// SetStatusPhase updates the canary status phase
|
||||
func (c *ServiceController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
return setStatusPhase(c.flaggerClient, cd, phase)
|
||||
}
|
||||
|
||||
// GetMetadata returns the pod label selector and svc ports
|
||||
func (c *ServiceController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
// Initialize creates or updates the primary and canary services to prepare for the canary release process targeted on the K8s service
|
||||
func (c *ServiceController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
|
||||
svc, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// canary svc
|
||||
err = c.reconcileCanaryService(cd, canaryName, svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// primary svc
|
||||
err = c.reconcilePrimaryService(cd, primaryName, svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) reconcileCanaryService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
|
||||
current, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return c.createService(canary, name, src)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s query error %v", name, err)
|
||||
}
|
||||
|
||||
new := buildService(canary, name, src)
|
||||
|
||||
if new.Spec.Type == "ClusterIP" {
|
||||
// We can't change this immutable field
|
||||
new.Spec.ClusterIP = current.Spec.ClusterIP
|
||||
}
|
||||
|
||||
// We can't change this immutable field
|
||||
new.ObjectMeta.UID = current.ObjectMeta.UID
|
||||
|
||||
new.ObjectMeta.ResourceVersion = current.ObjectMeta.ResourceVersion
|
||||
|
||||
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Update(new)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s.%s updated", new.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) reconcilePrimaryService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
|
||||
_, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return c.createService(canary, name, src)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s query error %v", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) createService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
|
||||
svc := buildService(canary, name, src)
|
||||
|
||||
if svc.Spec.Type == "ClusterIP" {
|
||||
// Reset and let K8s assign the IP. Otherwise we get an error due to the IP is already assigned
|
||||
svc.Spec.ClusterIP = ""
|
||||
}
|
||||
|
||||
// Let K8s set this. Otherwise K8s API complains with "resourceVersion should not be set on objects to be created"
|
||||
svc.ObjectMeta.ResourceVersion = ""
|
||||
|
||||
_, err := c.kubeClient.CoreV1().Services(canary.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s.%s created", svc.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildService(canary *flaggerv1.Canary, name string, src *corev1.Service) *corev1.Service {
|
||||
svc := src.DeepCopy()
|
||||
svc.ObjectMeta.Name = name
|
||||
svc.ObjectMeta.Namespace = canary.Namespace
|
||||
svc.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
}
|
||||
_, exists := svc.ObjectMeta.Annotations["kubectl.kubernetes.io/last-applied-configuration"]
|
||||
if exists {
|
||||
// Leaving this results in updates from flagger to this svc never succeed due to resourceVersion mismatch:
|
||||
// Operation cannot be fulfilled on services "mysvc-canary": the object has been modified; please apply your changes to the latest version and try again
|
||||
delete(svc.ObjectMeta.Annotations, "kubectl.kubernetes.io/last-applied-configuration")
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// Promote copies target's spec from canary to primary
|
||||
func (c *ServiceController) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
canary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("service %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("service %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
primary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("service %s.%s not found", primaryName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("service %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
primaryCopy := canary.DeepCopy()
|
||||
primaryCopy.ObjectMeta.Name = primary.ObjectMeta.Name
|
||||
if primaryCopy.Spec.Type == "ClusterIP" {
|
||||
primaryCopy.Spec.ClusterIP = primary.Spec.ClusterIP
|
||||
}
|
||||
primaryCopy.ObjectMeta.ResourceVersion = primary.ObjectMeta.ResourceVersion
|
||||
primaryCopy.ObjectMeta.UID = primary.ObjectMeta.UID
|
||||
|
||||
// apply update
|
||||
_, err = c.kubeClient.CoreV1().Services(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating service %s.%s spec failed: %v",
|
||||
primaryCopy.GetName(), primaryCopy.Namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasServiceChanged returns true if the canary service spec has changed
|
||||
func (c *ServiceController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("service %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return false, fmt.Errorf("service %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
return hasSpecChanged(cd, canary.Spec)
|
||||
}
|
||||
|
||||
// Scale sets the canary deployment replicas
|
||||
func (c *ServiceController) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) ScaleFromZero(cd *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
dep, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("service %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
}
|
||||
return ex.Wrap(err, "SyncStatus service query error")
|
||||
}
|
||||
|
||||
return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec, func(cdCopy *flaggerv1.Canary) {})
|
||||
}
|
||||
|
||||
func (c *ServiceController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *ServiceController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
30
pkg/canary/spec.go
Normal file
30
pkg/canary/spec.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
func hasSpecChanged(cd *v1alpha3.Canary, spec interface{}) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newHash, err := hashstructure.Hash(spec, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("hash error %v", err)
|
||||
}
|
||||
|
||||
// do not trigger a canary deployment on manual rollback
|
||||
if cd.Status.LastPromotedSpec == fmt.Sprintf("%d", newHash) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if cd.Status.LastAppliedSpec != fmt.Sprintf("%d", newHash) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/mitchellh/hashstructure"
|
||||
ex "github.com/pkg/errors"
|
||||
"github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -14,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
// SyncStatus encodes the canary pod spec and updates the canary status
|
||||
func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
func (c *DeploymentController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
@@ -23,12 +24,18 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
|
||||
return ex.Wrap(err, "SyncStatus deployment query error")
|
||||
}
|
||||
|
||||
configs, err := c.ConfigTracker.GetConfigRefs(cd)
|
||||
configs, err := c.configTracker.GetConfigRefs(cd)
|
||||
if err != nil {
|
||||
return ex.Wrap(err, "SyncStatus configs query error")
|
||||
}
|
||||
|
||||
hash, err := hashstructure.Hash(dep.Spec.Template, nil)
|
||||
return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec.Template, func(cdCopy *flaggerv1.Canary) {
|
||||
cdCopy.Status.TrackedConfigs = configs
|
||||
})
|
||||
}
|
||||
|
||||
func syncCanaryStatus(flaggerClient versioned.Interface, cd *flaggerv1.Canary, status flaggerv1.CanaryStatus, canaryResource interface{}, setAll func(cdCopy *flaggerv1.Canary)) error {
|
||||
hash, err := hashstructure.Hash(canaryResource, nil)
|
||||
if err != nil {
|
||||
return ex.Wrap(err, "SyncStatus hash error")
|
||||
}
|
||||
@@ -37,7 +44,7 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
var selErr error
|
||||
if !firstTry {
|
||||
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
if selErr != nil {
|
||||
return selErr
|
||||
}
|
||||
@@ -49,13 +56,13 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
|
||||
cdCopy.Status.Iterations = status.Iterations
|
||||
cdCopy.Status.LastAppliedSpec = fmt.Sprintf("%d", hash)
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
cdCopy.Status.TrackedConfigs = configs
|
||||
setAll(cdCopy)
|
||||
|
||||
if ok, conditions := c.MakeStatusConditions(cd.Status, status.Phase); ok {
|
||||
if ok, conditions := MakeStatusConditions(cd.Status, status.Phase); ok {
|
||||
cdCopy.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
firstTry = false
|
||||
return
|
||||
})
|
||||
@@ -66,12 +73,16 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
func (c *DeploymentController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusFailedChecks(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
func setStatusFailedChecks(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
|
||||
firstTry := true
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
var selErr error
|
||||
if !firstTry {
|
||||
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
if selErr != nil {
|
||||
return selErr
|
||||
}
|
||||
@@ -80,7 +91,7 @@ func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy.Status.FailedChecks = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
firstTry = false
|
||||
return
|
||||
})
|
||||
@@ -91,12 +102,16 @@ func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
}
|
||||
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
func (c *DeploymentController) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusWeight(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
func setStatusWeight(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
|
||||
firstTry := true
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
var selErr error
|
||||
if !firstTry {
|
||||
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
if selErr != nil {
|
||||
return selErr
|
||||
}
|
||||
@@ -105,7 +120,7 @@ func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy.Status.CanaryWeight = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
firstTry = false
|
||||
return
|
||||
})
|
||||
@@ -116,12 +131,16 @@ func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
}
|
||||
|
||||
// SetStatusIterations updates the canary status iterations value
|
||||
func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
func (c *DeploymentController) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
return setStatusIterations(c.flaggerClient, cd, val)
|
||||
}
|
||||
|
||||
func setStatusIterations(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
|
||||
firstTry := true
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
var selErr error
|
||||
if !firstTry {
|
||||
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
if selErr != nil {
|
||||
return selErr
|
||||
}
|
||||
@@ -131,7 +150,7 @@ func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy.Status.Iterations = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
firstTry = false
|
||||
return
|
||||
})
|
||||
@@ -143,12 +162,16 @@ func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
}
|
||||
|
||||
// SetStatusPhase updates the canary status phase
|
||||
func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
func (c *DeploymentController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
return setStatusPhase(c.flaggerClient, cd, phase)
|
||||
}
|
||||
|
||||
func setStatusPhase(flaggerClient versioned.Interface, cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
firstTry := true
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
var selErr error
|
||||
if !firstTry {
|
||||
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
|
||||
if selErr != nil {
|
||||
return selErr
|
||||
}
|
||||
@@ -167,11 +190,11 @@ func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPh
|
||||
cdCopy.Status.LastPromotedSpec = cd.Status.LastAppliedSpec
|
||||
}
|
||||
|
||||
if ok, conditions := c.MakeStatusConditions(cdCopy.Status, phase); ok {
|
||||
if ok, conditions := MakeStatusConditions(cdCopy.Status, phase); ok {
|
||||
cdCopy.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
firstTry = false
|
||||
return
|
||||
})
|
||||
@@ -181,8 +204,8 @@ func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPh
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStatusCondition returns a condition based on type
|
||||
func (c *Deployer) getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.CanaryConditionType) *flaggerv1.CanaryCondition {
|
||||
// getStatusCondition returns a condition based on type
|
||||
func getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.CanaryConditionType) *flaggerv1.CanaryCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == conditionType {
|
||||
@@ -193,9 +216,9 @@ func (c *Deployer) getStatusCondition(status flaggerv1.CanaryStatus, conditionTy
|
||||
}
|
||||
|
||||
// MakeStatusCondition updates the canary status conditions based on canary phase
|
||||
func (c *Deployer) MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
|
||||
func MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
|
||||
phase flaggerv1.CanaryPhase) (bool, []flaggerv1.CanaryCondition) {
|
||||
currentCondition := c.getStatusCondition(canaryStatus, flaggerv1.PromotedType)
|
||||
currentCondition := getStatusCondition(canaryStatus, flaggerv1.PromotedType)
|
||||
|
||||
message := "New deployment detected, starting initialization."
|
||||
status := corev1.ConditionUnknown
|
||||
|
||||
@@ -45,9 +45,9 @@ type Controller struct {
|
||||
logger *zap.SugaredLogger
|
||||
canaries *sync.Map
|
||||
jobs map[string]CanaryJob
|
||||
deployer canary.Deployer
|
||||
recorder metrics.Recorder
|
||||
notifier notifier.Interface
|
||||
canaryFactory *canary.Factory
|
||||
routerFactory *router.Factory
|
||||
observerFactory *metrics.Factory
|
||||
meshProvider string
|
||||
@@ -61,11 +61,11 @@ func NewController(
|
||||
flaggerWindow time.Duration,
|
||||
logger *zap.SugaredLogger,
|
||||
notifier notifier.Interface,
|
||||
canaryFactory *canary.Factory,
|
||||
routerFactory *router.Factory,
|
||||
observerFactory *metrics.Factory,
|
||||
meshProvider string,
|
||||
version string,
|
||||
labels []string,
|
||||
) *Controller {
|
||||
logger.Debug("Creating event broadcaster")
|
||||
flaggerscheme.AddToScheme(scheme.Scheme)
|
||||
@@ -76,19 +76,6 @@ func NewController(
|
||||
})
|
||||
eventRecorder := eventBroadcaster.NewRecorder(
|
||||
scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
|
||||
deployer := canary.Deployer{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
Labels: labels,
|
||||
ConfigTracker: canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
|
||||
recorder := metrics.NewRecorder(controllerAgentName, true)
|
||||
recorder.SetInfo(version, meshProvider)
|
||||
|
||||
@@ -104,10 +91,10 @@ func NewController(
|
||||
canaries: new(sync.Map),
|
||||
jobs: map[string]CanaryJob{},
|
||||
flaggerWindow: flaggerWindow,
|
||||
deployer: deployer,
|
||||
observerFactory: observerFactory,
|
||||
recorder: recorder,
|
||||
notifier: notifier,
|
||||
canaryFactory: canaryFactory,
|
||||
routerFactory: routerFactory,
|
||||
meshProvider: meshProvider,
|
||||
}
|
||||
@@ -218,7 +205,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
|
||||
// set status condition for new canaries
|
||||
if cd.Status.Conditions == nil {
|
||||
if ok, conditions := c.deployer.MakeStatusConditions(cd.Status, flaggerv1.CanaryPhaseInitializing); ok {
|
||||
if ok, conditions := canary.MakeStatusConditions(cd.Status, flaggerv1.CanaryPhaseInitializing); ok {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Conditions = conditions
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
hpav2 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -37,7 +38,7 @@ type Mocks struct {
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
deployer canary.Deployer
|
||||
deployer canary.Controller
|
||||
ctrl *Controller
|
||||
logger *zap.SugaredLogger
|
||||
router router.Interface
|
||||
@@ -52,6 +53,7 @@ func SetupMocks(c *flaggerv1.Canary) Mocks {
|
||||
// init kube clientset and register mock objects
|
||||
kubeClient := fake.NewSimpleClientset(
|
||||
newTestDeployment(),
|
||||
newTestService(),
|
||||
newTestHPA(),
|
||||
NewTestConfigMap(),
|
||||
NewTestConfigMapEnv(),
|
||||
@@ -63,19 +65,6 @@ func SetupMocks(c *flaggerv1.Canary) Mocks {
|
||||
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
|
||||
// init controller helpers
|
||||
deployer := canary.Deployer{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
Labels: []string{"app", "name"},
|
||||
ConfigTracker: canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
|
||||
// init controller
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
@@ -86,6 +75,14 @@ func SetupMocks(c *flaggerv1.Canary) Mocks {
|
||||
// init observer
|
||||
observerFactory, _ := metrics.NewFactory("fake", "istio", 5*time.Second)
|
||||
|
||||
// init canary factory
|
||||
configTracker := canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
}
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger)
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: flaggerClient,
|
||||
@@ -97,7 +94,7 @@ func SetupMocks(c *flaggerv1.Canary) Mocks {
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
canaryFactory: canaryFactory,
|
||||
observerFactory: observerFactory,
|
||||
recorder: metrics.NewRecorder(controllerAgentName, false),
|
||||
routerFactory: rf,
|
||||
@@ -108,7 +105,7 @@ func SetupMocks(c *flaggerv1.Canary) Mocks {
|
||||
|
||||
return Mocks{
|
||||
canary: c,
|
||||
deployer: deployer,
|
||||
deployer: canaryFactory.Controller("Deployment"),
|
||||
logger: logger,
|
||||
flaggerClient: flaggerClient,
|
||||
meshClient: flaggerClient,
|
||||
@@ -560,6 +557,58 @@ func newTestDeploymentV2() *appsv1.Deployment {
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestService() *corev1.Service {
|
||||
d := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
TargetPort: intstr.FromString("http"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestServiceV2() *corev1.Service {
|
||||
d := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "podinfo-v2",
|
||||
},
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
Port: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
TargetPort: intstr.FromString("http"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
|
||||
h := &hpav2.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
|
||||
|
||||
@@ -2,13 +2,14 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
)
|
||||
|
||||
@@ -89,43 +90,51 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
// override the global provider if one is specified in the canary spec
|
||||
provider := c.meshProvider
|
||||
if cd.Spec.Provider != "" {
|
||||
provider = cd.Spec.Provider
|
||||
}
|
||||
|
||||
// create primary deployment and hpa if needed
|
||||
// skip primary check for Istio since the deployment will become ready after the ClusterIP are created
|
||||
skipPrimaryCheck := false
|
||||
if skipLivenessChecks || strings.Contains(provider, "istio") || strings.Contains(provider, "appmesh") {
|
||||
skipPrimaryCheck = true
|
||||
}
|
||||
labelSelector, ports, err := c.deployer.Initialize(cd, skipPrimaryCheck)
|
||||
// init controller based on target kind
|
||||
canaryController := c.canaryFactory.Controller(cd.Spec.TargetRef.Kind)
|
||||
labelSelector, ports, err := canaryController.GetMetadata(cd)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// init routers
|
||||
meshRouter := c.routerFactory.MeshRouter(provider)
|
||||
|
||||
// create or update ClusterIP services
|
||||
if err := c.routerFactory.KubernetesRouter(labelSelector, map[string]string{}, ports).Reconcile(cd); err != nil {
|
||||
// init Kubernetes router
|
||||
router := c.routerFactory.KubernetesRouter(cd.Spec.TargetRef.Kind, labelSelector, map[string]string{}, ports)
|
||||
if err := router.Initialize(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// create or update virtual service
|
||||
// create primary deployment and hpa
|
||||
err = canaryController.Initialize(cd, skipLivenessChecks)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// init mesh router
|
||||
meshRouter := c.routerFactory.MeshRouter(provider)
|
||||
|
||||
// create or update svc
|
||||
if err := router.Reconcile(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// create or update mesh routes
|
||||
if err := meshRouter.Reconcile(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// check for deployment spec or configs changes
|
||||
shouldAdvance, err := c.shouldAdvance(cd)
|
||||
// check for changes
|
||||
shouldAdvance, err := c.shouldAdvance(cd, canaryController)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
@@ -137,7 +146,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
// check gates
|
||||
if isApproved := c.runConfirmRolloutHooks(cd); !isApproved {
|
||||
if isApproved := c.runConfirmRolloutHooks(cd, canaryController); !isApproved {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -148,8 +157,8 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
// check primary deployment status
|
||||
if !skipLivenessChecks {
|
||||
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
|
||||
if !skipLivenessChecks && !cd.Spec.SkipAnalysis {
|
||||
if _, err := canaryController.IsPrimaryReady(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -165,12 +174,12 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
|
||||
// check if canary analysis should start (canary revision has changes) or continue
|
||||
if ok := c.checkCanaryStatus(cd, shouldAdvance); !ok {
|
||||
if ok := c.checkCanaryStatus(cd, canaryController, shouldAdvance); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// check if canary revision changed during analysis
|
||||
if restart := c.hasCanaryRevisionChanged(cd); restart {
|
||||
if restart := c.hasCanaryRevisionChanged(cd, canaryController); restart {
|
||||
c.recordEventInfof(cd, "New revision detected! Restarting analysis for %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
|
||||
@@ -189,21 +198,17 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
FailedChecks: 0,
|
||||
Iterations: 0,
|
||||
}
|
||||
if err := c.deployer.SyncStatus(cd, status); err != nil {
|
||||
if err := canaryController.SyncStatus(cd, status); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
c.recorder.SetDuration(cd, time.Since(begin))
|
||||
}()
|
||||
|
||||
// check canary deployment status
|
||||
var retriable = true
|
||||
if !skipLivenessChecks {
|
||||
retriable, err = c.deployer.IsCanaryReady(cd)
|
||||
retriable, err = canaryController.IsCanaryReady(cd)
|
||||
if err != nil && retriable {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
@@ -211,7 +216,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
// check if analysis should be skipped
|
||||
if skip := c.shouldSkipAnalysis(cd, meshRouter, primaryWeight, canaryWeight); skip {
|
||||
if skip := c.shouldSkipAnalysis(cd, canaryController, meshRouter, primaryWeight, canaryWeight); skip {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,7 +232,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
|
||||
if err := canaryController.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -237,13 +242,13 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
|
||||
// scale canary to zero if promotion has finished
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
if err := c.deployer.Scale(cd, 0); err != nil {
|
||||
if err := canaryController.Scale(cd, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// set status to succeeded
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseSucceeded); err != nil {
|
||||
if err := canaryController.SetStatusPhase(cd, flaggerv1.CanaryPhaseSucceeded); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -286,13 +291,13 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
cd.Name, cd.Namespace)
|
||||
|
||||
// shutdown canary
|
||||
if err := c.deployer.Scale(cd, 0); err != nil {
|
||||
if err := canaryController.Scale(cd, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// mark canary as failed
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseFailed, CanaryWeight: 0}); err != nil {
|
||||
if err := canaryController.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseFailed, CanaryWeight: 0}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
@@ -302,6 +307,11 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
// record analysis duration
|
||||
defer func() {
|
||||
c.recorder.SetDuration(cd, time.Since(begin))
|
||||
}()
|
||||
|
||||
// check if the canary success rate is above the threshold
|
||||
// skip check if no traffic is routed or mirrored to canary
|
||||
if canaryWeight == 0 && cd.Status.Iterations == 0 &&
|
||||
@@ -310,15 +320,15 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
|
||||
// run pre-rollout web hooks
|
||||
if ok := c.runPreRolloutHooks(cd); !ok {
|
||||
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
if err := canaryController.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if ok := c.analyseCanary(cd); !ok {
|
||||
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
if ok := c.runAnalysis(cd); !ok {
|
||||
if err := canaryController.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -341,255 +351,267 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
|
||||
// strategy: A/B testing
|
||||
if len(cd.Spec.CanaryAnalysis.Match) > 0 && cd.Spec.CanaryAnalysis.Iterations > 0 {
|
||||
// route traffic to canary and increment iterations
|
||||
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
|
||||
if err := meshRouter.SetRoutes(cd, 0, 100, false); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, 0, 100)
|
||||
|
||||
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary iteration %v/%v",
|
||||
cd.Name, cd.Namespace, cd.Status.Iterations+1, cd.Spec.CanaryAnalysis.Iterations)
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations == cd.Status.Iterations {
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, primaryName, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.runAB(cd, canaryController, meshRouter, provider)
|
||||
return
|
||||
}
|
||||
|
||||
// strategy: Blue/Green
|
||||
if cd.Spec.CanaryAnalysis.Iterations > 0 {
|
||||
// increment iterations
|
||||
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
|
||||
// If in "mirror" mode, mirror requests during the entire B/G canary test
|
||||
if provider != "kubernetes" &&
|
||||
cd.Spec.CanaryAnalysis.Mirror == true && mirrored == false {
|
||||
if err := meshRouter.SetRoutes(cd, 100, 0, true); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).
|
||||
Infof("Start traffic mirroring")
|
||||
}
|
||||
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary iteration %v/%v",
|
||||
cd.Name, cd.Namespace, cd.Status.Iterations+1, cd.Spec.CanaryAnalysis.Iterations)
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// route all traffic to canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations == cd.Status.Iterations {
|
||||
if provider != "kubernetes" {
|
||||
if cd.Spec.CanaryAnalysis.Mirror {
|
||||
c.recordEventInfof(cd, "Stop traffic mirroring and route all traffic to canary")
|
||||
} else {
|
||||
c.recordEventInfof(cd, "Routing all traffic to canary")
|
||||
}
|
||||
if err := meshRouter.SetRoutes(cd, 0, 100, false); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, 0, 100)
|
||||
}
|
||||
|
||||
// increment iterations
|
||||
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations < cd.Status.Iterations {
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, primaryName, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.runBlueGreen(cd, canaryController, meshRouter, provider, mirrored)
|
||||
return
|
||||
}
|
||||
|
||||
// strategy: Canary progressive traffic increase
|
||||
if cd.Spec.CanaryAnalysis.StepWeight > 0 {
|
||||
// increase traffic weight
|
||||
if canaryWeight < maxWeight {
|
||||
// If in "mirror" mode, do one step of mirroring before shifting traffic to canary.
|
||||
// When mirroring, all requests go to primary and canary, but only responses from
|
||||
// primary go back to the user.
|
||||
if cd.Spec.CanaryAnalysis.Mirror && canaryWeight == 0 {
|
||||
if mirrored == false {
|
||||
mirrored = true
|
||||
primaryWeight = 100
|
||||
canaryWeight = 0
|
||||
} else {
|
||||
mirrored = false
|
||||
primaryWeight = 100 - cd.Spec.CanaryAnalysis.StepWeight
|
||||
canaryWeight = cd.Spec.CanaryAnalysis.StepWeight
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).
|
||||
Infof("Running mirror step %d/%d/%t", primaryWeight, canaryWeight, mirrored)
|
||||
} else {
|
||||
|
||||
primaryWeight -= cd.Spec.CanaryAnalysis.StepWeight
|
||||
if primaryWeight < 0 {
|
||||
primaryWeight = 0
|
||||
}
|
||||
canaryWeight += cd.Spec.CanaryAnalysis.StepWeight
|
||||
if canaryWeight > 100 {
|
||||
canaryWeight = 100
|
||||
}
|
||||
}
|
||||
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, mirrored); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.deployer.SetStatusWeight(cd, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryWeight)
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max weight reached
|
||||
if canaryWeight >= maxWeight {
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// update primary spec
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, primaryName, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
c.runCanary(cd, canaryController, meshRouter, provider, mirrored, canaryWeight, primaryWeight, maxWeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.Interface, primaryWeight int, canaryWeight int) bool {
|
||||
if !cd.Spec.SkipAnalysis {
|
||||
func (c *Controller) runCanary(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, provider string, mirrored bool, canaryWeight int, primaryWeight int, maxWeight int) {
|
||||
primaryName := fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name)
|
||||
|
||||
// increase traffic weight
|
||||
if canaryWeight < maxWeight {
|
||||
// If in "mirror" mode, do one step of mirroring before shifting traffic to canary.
|
||||
// When mirroring, all requests go to primary and canary, but only responses from
|
||||
// primary go back to the user.
|
||||
if canary.Spec.CanaryAnalysis.Mirror && canaryWeight == 0 {
|
||||
if mirrored == false {
|
||||
mirrored = true
|
||||
primaryWeight = 100
|
||||
canaryWeight = 0
|
||||
} else {
|
||||
mirrored = false
|
||||
primaryWeight = 100 - canary.Spec.CanaryAnalysis.StepWeight
|
||||
canaryWeight = canary.Spec.CanaryAnalysis.StepWeight
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Running mirror step %d/%d/%t", primaryWeight, canaryWeight, mirrored)
|
||||
} else {
|
||||
|
||||
primaryWeight -= canary.Spec.CanaryAnalysis.StepWeight
|
||||
if primaryWeight < 0 {
|
||||
primaryWeight = 0
|
||||
}
|
||||
canaryWeight += canary.Spec.CanaryAnalysis.StepWeight
|
||||
if canaryWeight > 100 {
|
||||
canaryWeight = 100
|
||||
}
|
||||
}
|
||||
|
||||
if err := meshRouter.SetRoutes(canary, primaryWeight, canaryWeight, mirrored); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := canaryController.SetStatusWeight(canary, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recorder.SetWeight(canary, primaryWeight, canaryWeight)
|
||||
c.recordEventInfof(canary, "Advance %s.%s canary weight %v", canary.Name, canary.Namespace, canaryWeight)
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max weight reached
|
||||
if canaryWeight >= maxWeight {
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(canary); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// update primary spec
|
||||
c.recordEventInfof(canary, "Copying %s.%s template spec to %s.%s",
|
||||
canary.Spec.TargetRef.Name, canary.Namespace, primaryName, canary.Namespace)
|
||||
if err := canaryController.Promote(canary); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) runAB(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, provider string) {
|
||||
primaryName := fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name)
|
||||
|
||||
// route traffic to canary and increment iterations
|
||||
if canary.Spec.CanaryAnalysis.Iterations > canary.Status.Iterations {
|
||||
if err := meshRouter.SetRoutes(canary, 0, 100, false); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(canary, 0, 100)
|
||||
|
||||
if err := canaryController.SetStatusIterations(canary, canary.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recordEventInfof(canary, "Advance %s.%s canary iteration %v/%v",
|
||||
canary.Name, canary.Namespace, canary.Status.Iterations+1, canary.Spec.CanaryAnalysis.Iterations)
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(canary); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max iterations reached
|
||||
if canary.Spec.CanaryAnalysis.Iterations == canary.Status.Iterations {
|
||||
c.recordEventInfof(canary, "Copying %s.%s template spec to %s.%s",
|
||||
canary.Spec.TargetRef.Name, canary.Namespace, primaryName, canary.Namespace)
|
||||
if err := canaryController.Promote(canary); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) runBlueGreen(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, provider string, mirrored bool) {
|
||||
primaryName := fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name)
|
||||
|
||||
// increment iterations
|
||||
if canary.Spec.CanaryAnalysis.Iterations > canary.Status.Iterations {
|
||||
// If in "mirror" mode, mirror requests during the entire B/G canary test
|
||||
if provider != "kubernetes" &&
|
||||
canary.Spec.CanaryAnalysis.Mirror == true && mirrored == false {
|
||||
if err := meshRouter.SetRoutes(canary, 100, 0, true); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Start traffic mirroring")
|
||||
}
|
||||
if err := canaryController.SetStatusIterations(canary, canary.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recordEventInfof(canary, "Advance %s.%s canary iteration %v/%v",
|
||||
canary.Name, canary.Namespace, canary.Status.Iterations+1, canary.Spec.CanaryAnalysis.Iterations)
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(canary); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// route all traffic to canary - max iterations reached
|
||||
if canary.Spec.CanaryAnalysis.Iterations == canary.Status.Iterations {
|
||||
if provider != "kubernetes" {
|
||||
if canary.Spec.CanaryAnalysis.Mirror {
|
||||
c.recordEventInfof(canary, "Stop traffic mirroring and route all traffic to canary")
|
||||
} else {
|
||||
c.recordEventInfof(canary, "Routing all traffic to canary")
|
||||
}
|
||||
if err := meshRouter.SetRoutes(canary, 0, 100, false); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(canary, 0, 100)
|
||||
}
|
||||
|
||||
// increment iterations
|
||||
if err := canaryController.SetStatusIterations(canary, canary.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max iterations reached
|
||||
if canary.Spec.CanaryAnalysis.Iterations < canary.Status.Iterations {
|
||||
c.recordEventInfof(canary, "Copying %s.%s template spec to %s.%s",
|
||||
canary.Spec.TargetRef.Name, canary.Namespace, primaryName, canary.Namespace)
|
||||
if err := canaryController.Promote(canary); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, primaryWeight int, canaryWeight int) bool {
|
||||
if !canary.Spec.SkipAnalysis {
|
||||
return false
|
||||
}
|
||||
|
||||
// route all traffic to primary
|
||||
primaryWeight = 100
|
||||
canaryWeight = 0
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, false); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
if err := meshRouter.SetRoutes(canary, primaryWeight, canaryWeight, false); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
c.recorder.SetWeight(canary, primaryWeight, canaryWeight)
|
||||
|
||||
// copy spec and configs from canary to primary
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s-primary.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
c.recordEventInfof(canary, "Copying %s.%s template spec to %s-primary.%s",
|
||||
canary.Spec.TargetRef.Name, canary.Namespace, canary.Spec.TargetRef.Name, canary.Namespace)
|
||||
if err := canaryController.Promote(canary); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// shutdown canary
|
||||
if err := c.deployer.Scale(cd, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
if err := canaryController.Scale(canary, 0); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseSucceeded); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhaseSucceeded); err != nil {
|
||||
c.recordEventWarningf(canary, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// notify
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseSucceeded)
|
||||
c.recordEventInfof(cd, "Promotion completed! Canary analysis was skipped for %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "Canary analysis was skipped, promotion finished.",
|
||||
c.recorder.SetStatus(canary, flaggerv1.CanaryPhaseSucceeded)
|
||||
c.recordEventInfof(canary, "Promotion completed! Canary analysis was skipped for %s.%s",
|
||||
canary.Spec.TargetRef.Name, canary.Namespace)
|
||||
c.sendNotification(canary, "Canary analysis was skipped, promotion finished.",
|
||||
false, false)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseInitializing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseWaiting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
func (c *Controller) shouldAdvance(canary *flaggerv1.Canary, canaryController canary.Controller) (bool, error) {
|
||||
if canary.Status.LastAppliedSpec == "" ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhaseInitializing ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhaseWaiting ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newDep, err := c.deployer.HasDeploymentChanged(cd)
|
||||
newTarget, err := canaryController.HasTargetChanged(canary)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newDep {
|
||||
return newDep, nil
|
||||
if newTarget {
|
||||
return newTarget, nil
|
||||
}
|
||||
|
||||
newCfg, err := c.deployer.ConfigTracker.HasConfigChanged(cd)
|
||||
newCfg, err := canaryController.HaveDependenciesChanged(canary)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -598,63 +620,63 @@ func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(cd, cd.Status.Phase)
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
func (c *Controller) checkCanaryStatus(canary *flaggerv1.Canary, canaryController canary.Controller, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(canary, canary.Status.Phase)
|
||||
if canary.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
canary.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
return true
|
||||
}
|
||||
|
||||
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitialized}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
if canary.Status.Phase == "" || canary.Status.Phase == flaggerv1.CanaryPhaseInitializing {
|
||||
if err := canaryController.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitialized}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseInitialized)
|
||||
c.recordEventInfof(cd, "Initialization done! %s.%s", cd.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "New deployment detected, initialization completed.",
|
||||
c.recorder.SetStatus(canary, flaggerv1.CanaryPhaseInitialized)
|
||||
c.recordEventInfof(canary, "Initialization done! %s.%s", canary.Name, canary.Namespace)
|
||||
c.sendNotification(canary, "New deployment detected, initialization completed.",
|
||||
true, false)
|
||||
return false
|
||||
}
|
||||
|
||||
if shouldAdvance {
|
||||
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "New revision detected, starting canary analysis.",
|
||||
c.recordEventInfof(canary, "New revision detected! Scaling up %s.%s", canary.Spec.TargetRef.Name, canary.Namespace)
|
||||
c.sendNotification(canary, "New revision detected, starting canary analysis.",
|
||||
true, false)
|
||||
if err := c.deployer.ScaleUp(cd); err != nil {
|
||||
c.recordEventErrorf(cd, "%v", err)
|
||||
if err := canaryController.ScaleFromZero(canary); err != nil {
|
||||
c.recordEventErrorf(canary, "%v", err)
|
||||
return false
|
||||
}
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
if err := canaryController.SyncStatus(canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseProgressing)
|
||||
c.recorder.SetStatus(canary, flaggerv1.CanaryPhaseProgressing)
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing {
|
||||
if diff, _ := c.deployer.HasDeploymentChanged(cd); diff {
|
||||
func (c *Controller) hasCanaryRevisionChanged(canary *flaggerv1.Canary, canaryController canary.Controller) bool {
|
||||
if canary.Status.Phase == flaggerv1.CanaryPhaseProgressing {
|
||||
if diff, _ := canaryController.HasTargetChanged(canary); diff {
|
||||
return true
|
||||
}
|
||||
if diff, _ := c.deployer.ConfigTracker.HasConfigChanged(cd); diff {
|
||||
if diff, _ := canaryController.HaveDependenciesChanged(canary); diff {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) runConfirmRolloutHooks(canary *flaggerv1.Canary) bool {
|
||||
func (c *Controller) runConfirmRolloutHooks(canary *flaggerv1.Canary, canaryController canary.Controller) bool {
|
||||
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == flaggerv1.ConfirmRolloutHook {
|
||||
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryPhaseProgressing, webhook)
|
||||
if err != nil {
|
||||
if canary.Status.Phase != flaggerv1.CanaryPhaseWaiting {
|
||||
if err := c.deployer.SetStatusPhase(canary, flaggerv1.CanaryPhaseWaiting); err != nil {
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhaseWaiting); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
|
||||
}
|
||||
c.recordEventWarningf(canary, "Halt %s.%s advancement waiting for approval %s",
|
||||
@@ -664,7 +686,7 @@ func (c *Controller) runConfirmRolloutHooks(canary *flaggerv1.Canary) bool {
|
||||
return false
|
||||
} else {
|
||||
if canary.Status.Phase == flaggerv1.CanaryPhaseWaiting {
|
||||
if err := c.deployer.SetStatusPhase(canary, flaggerv1.CanaryPhaseProgressing); err != nil {
|
||||
if err := canaryController.SetStatusPhase(canary, flaggerv1.CanaryPhaseProgressing); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
@@ -725,7 +747,7 @@ func (c *Controller) runPostRolloutHooks(canary *flaggerv1.Canary, phase flagger
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
func (c *Controller) runAnalysis(r *flaggerv1.Canary) bool {
|
||||
// run external checks
|
||||
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == "" || webhook.Type == flaggerv1.RolloutHook {
|
||||
|
||||
166
pkg/controller/scheduler_svc_test.go
Normal file
166
pkg/controller/scheduler_svc_test.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
func TestScheduler_ServicePromotion(t *testing.T) {
|
||||
mocks := SetupMocks(newTestServiceCanary())
|
||||
|
||||
// init
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check initialized status
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != flaggerv1.CanaryPhaseInitialized {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseInitialized)
|
||||
}
|
||||
|
||||
// update
|
||||
svc2 := newTestServiceV2()
|
||||
_, err = mocks.kubeClient.CoreV1().Services("default").Update(svc2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect service spec changes
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
primaryWeight = 60
|
||||
canaryWeight = 40
|
||||
err = mocks.router.SetRoutes(mocks.canary, primaryWeight, canaryWeight, mirrored)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// advance
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check progressing status
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != flaggerv1.CanaryPhaseProgressing {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseProgressing)
|
||||
}
|
||||
|
||||
// promote
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check promoting status
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != flaggerv1.CanaryPhasePromoting {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhasePromoting)
|
||||
}
|
||||
|
||||
// finalise
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if primaryWeight != 100 {
|
||||
t.Errorf("Got primary route %v wanted %v", primaryWeight, 100)
|
||||
}
|
||||
|
||||
if canaryWeight != 0 {
|
||||
t.Errorf("Got canary route %v wanted %v", canaryWeight, 0)
|
||||
}
|
||||
|
||||
if mirrored != false {
|
||||
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
|
||||
}
|
||||
|
||||
primarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
primaryLabelValue := primarySvc.Spec.Selector["app"]
|
||||
canaryLabelValue := svc2.Spec.Selector["app"]
|
||||
if primaryLabelValue != canaryLabelValue {
|
||||
t.Errorf("Got primary selector label value %v wanted %v", primaryLabelValue, canaryLabelValue)
|
||||
}
|
||||
|
||||
// check finalising status
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != flaggerv1.CanaryPhaseFinalising {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising)
|
||||
}
|
||||
|
||||
// scale canary to zero
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestServiceCanary() *flaggerv1.Canary {
|
||||
cd := &flaggerv1.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: flaggerv1.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "core/v1",
|
||||
Kind: "Service",
|
||||
},
|
||||
Service: flaggerv1.CanaryService{
|
||||
Port: 9898,
|
||||
},
|
||||
CanaryAnalysis: flaggerv1.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []flaggerv1.CanaryMetric{
|
||||
{
|
||||
Name: "istio_requests_total",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
{
|
||||
Name: "istio_request_duration_seconds_bucket",
|
||||
Threshold: 500,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
@@ -34,9 +34,9 @@ func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
|
||||
}
|
||||
}
|
||||
|
||||
// KubernetesRouter returns a ClusterIP service router
|
||||
func (factory *Factory) KubernetesRouter(labelSelector string, annotations map[string]string, ports map[string]int32) *KubernetesRouter {
|
||||
return &KubernetesRouter{
|
||||
// KubernetesDeploymentRouter returns a ClusterIP service router
|
||||
func (factory *Factory) KubernetesRouter(kind string, labelSelector string, annotations map[string]string, ports map[string]int32) KubernetesRouter {
|
||||
deploymentRouter := &KubernetesDeploymentRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
@@ -44,6 +44,16 @@ func (factory *Factory) KubernetesRouter(labelSelector string, annotations map[s
|
||||
annotations: annotations,
|
||||
ports: ports,
|
||||
}
|
||||
noopRouter := &KubernetesNoopRouter{}
|
||||
|
||||
switch {
|
||||
case kind == "Deployment":
|
||||
return deploymentRouter
|
||||
case kind == "Service":
|
||||
return noopRouter
|
||||
default:
|
||||
return deploymentRouter
|
||||
}
|
||||
}
|
||||
|
||||
// MeshRouter returns a service mesh router
|
||||
|
||||
@@ -1,161 +1,13 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// KubernetesRouter is managing ClusterIP services
|
||||
type KubernetesRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
labelSelector string
|
||||
annotations map[string]string
|
||||
ports map[string]int32
|
||||
}
|
||||
|
||||
// Reconcile creates or updates the primary and canary services
|
||||
func (c *KubernetesRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
|
||||
// main svc
|
||||
err := c.reconcileService(canary, targetName, primaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// canary svc
|
||||
err = c.reconcileService(canary, canaryName, targetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// primary svc
|
||||
err = c.reconcileService(canary, primaryName, primaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubernetesRouter) SetRoutes(canary *flaggerv1.Canary, primaryRoute int, canaryRoute int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubernetesRouter) GetRoutes(canary *flaggerv1.Canary) (primaryRoute int, canaryRoute int, err error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (c *KubernetesRouter) reconcileService(canary *flaggerv1.Canary, name string, target string) error {
|
||||
portName := canary.Spec.Service.PortName
|
||||
if portName == "" {
|
||||
portName = "http"
|
||||
}
|
||||
|
||||
targetPort := intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: canary.Spec.Service.Port,
|
||||
}
|
||||
|
||||
if canary.Spec.Service.TargetPort.String() != "0" {
|
||||
targetPort = canary.Spec.Service.TargetPort
|
||||
}
|
||||
|
||||
svcSpec := corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{c.labelSelector: target},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: portName,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Port: canary.Spec.Service.Port,
|
||||
TargetPort: targetPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for n, p := range c.ports {
|
||||
cp := corev1.ServicePort{
|
||||
Name: n,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Port: p,
|
||||
TargetPort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: p,
|
||||
},
|
||||
}
|
||||
|
||||
svcSpec.Ports = append(svcSpec.Ports, cp)
|
||||
}
|
||||
|
||||
svc, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
svc = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: canary.Namespace,
|
||||
Labels: map[string]string{c.labelSelector: name},
|
||||
Annotations: c.annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: svcSpec,
|
||||
}
|
||||
|
||||
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s.%s created", svc.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s query error %v", name, err)
|
||||
}
|
||||
|
||||
if svc != nil {
|
||||
sortPorts := func(a, b interface{}) bool {
|
||||
return a.(corev1.ServicePort).Port < b.(corev1.ServicePort).Port
|
||||
}
|
||||
portsDiff := cmp.Diff(svcSpec.Ports, svc.Spec.Ports, cmpopts.SortSlices(sortPorts))
|
||||
selectorsDiff := cmp.Diff(svcSpec.Selector, svc.Spec.Selector)
|
||||
|
||||
if portsDiff != "" || selectorsDiff != "" {
|
||||
svcClone := svc.DeepCopy()
|
||||
svcClone.Spec.Ports = svcSpec.Ports
|
||||
svcClone.Spec.Selector = svcSpec.Selector
|
||||
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Update(svcClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s update error %v", name, err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s updated", svc.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// KubernetesRouter manages Kubernetes services
|
||||
type KubernetesRouter interface {
|
||||
// Initialize creates or updates the primary and canary services
|
||||
Initialize(canary *flaggerv1.Canary) error
|
||||
// Reconcile creates or updates the main service
|
||||
Reconcile(canary *flaggerv1.Canary) error
|
||||
}
|
||||
|
||||
169
pkg/router/kubernetes_deployment.go
Normal file
169
pkg/router/kubernetes_deployment.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// KubernetesDeploymentRouter is managing ClusterIP services
|
||||
type KubernetesDeploymentRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
labelSelector string
|
||||
annotations map[string]string
|
||||
ports map[string]int32
|
||||
}
|
||||
|
||||
// Initialize creates the primary and canary services
|
||||
func (c *KubernetesDeploymentRouter) Initialize(canary *flaggerv1.Canary) error {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
|
||||
// canary svc
|
||||
err := c.reconcileService(canary, canaryName, targetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// primary svc
|
||||
err = c.reconcileService(canary, primaryName, primaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile creates or updates the main service
|
||||
func (c *KubernetesDeploymentRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
// main svc
|
||||
err := c.reconcileService(canary, targetName, primaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubernetesDeploymentRouter) SetRoutes(canary *flaggerv1.Canary, primaryRoute int, canaryRoute int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubernetesDeploymentRouter) GetRoutes(canary *flaggerv1.Canary) (primaryRoute int, canaryRoute int, err error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (c *KubernetesDeploymentRouter) reconcileService(canary *flaggerv1.Canary, name string, target string) error {
|
||||
portName := canary.Spec.Service.PortName
|
||||
if portName == "" {
|
||||
portName = "http"
|
||||
}
|
||||
|
||||
targetPort := intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: canary.Spec.Service.Port,
|
||||
}
|
||||
|
||||
if canary.Spec.Service.TargetPort.String() != "0" {
|
||||
targetPort = canary.Spec.Service.TargetPort
|
||||
}
|
||||
|
||||
svcSpec := corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{c.labelSelector: target},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: portName,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Port: canary.Spec.Service.Port,
|
||||
TargetPort: targetPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for n, p := range c.ports {
|
||||
cp := corev1.ServicePort{
|
||||
Name: n,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Port: p,
|
||||
TargetPort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: p,
|
||||
},
|
||||
}
|
||||
|
||||
svcSpec.Ports = append(svcSpec.Ports, cp)
|
||||
}
|
||||
|
||||
svc, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
svc = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: canary.Namespace,
|
||||
Labels: map[string]string{c.labelSelector: name},
|
||||
Annotations: c.annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: svcSpec,
|
||||
}
|
||||
|
||||
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Create(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s.%s created", svc.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s query error %v", name, err)
|
||||
}
|
||||
|
||||
if svc != nil {
|
||||
sortPorts := func(a, b interface{}) bool {
|
||||
return a.(corev1.ServicePort).Port < b.(corev1.ServicePort).Port
|
||||
}
|
||||
portsDiff := cmp.Diff(svcSpec.Ports, svc.Spec.Ports, cmpopts.SortSlices(sortPorts))
|
||||
selectorsDiff := cmp.Diff(svcSpec.Selector, svc.Spec.Selector)
|
||||
|
||||
if portsDiff != "" || selectorsDiff != "" {
|
||||
svcClone := svc.DeepCopy()
|
||||
svcClone.Spec.Ports = svcSpec.Ports
|
||||
svcClone.Spec.Selector = svcSpec.Selector
|
||||
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Update(svcClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s update error %v", name, err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Service %s updated", svc.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,13 +8,18 @@ import (
|
||||
|
||||
func TestServiceRouter_Create(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &KubernetesRouter{
|
||||
router := &KubernetesDeploymentRouter{
|
||||
kubeClient: mocks.kubeClient,
|
||||
flaggerClient: mocks.flaggerClient,
|
||||
logger: mocks.logger,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.canary)
|
||||
err := router.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -48,13 +53,18 @@ func TestServiceRouter_Create(t *testing.T) {
|
||||
|
||||
func TestServiceRouter_Update(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &KubernetesRouter{
|
||||
router := &KubernetesDeploymentRouter{
|
||||
kubeClient: mocks.kubeClient,
|
||||
flaggerClient: mocks.flaggerClient,
|
||||
logger: mocks.logger,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.canary)
|
||||
err := router.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -73,6 +83,10 @@ func TestServiceRouter_Update(t *testing.T) {
|
||||
}
|
||||
|
||||
// apply changes
|
||||
err = router.Initialize(c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
err = router.Reconcile(c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
@@ -90,13 +104,18 @@ func TestServiceRouter_Update(t *testing.T) {
|
||||
|
||||
func TestServiceRouter_Undo(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &KubernetesRouter{
|
||||
router := &KubernetesDeploymentRouter{
|
||||
kubeClient: mocks.kubeClient,
|
||||
flaggerClient: mocks.flaggerClient,
|
||||
logger: mocks.logger,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.canary)
|
||||
err := router.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -116,6 +135,10 @@ func TestServiceRouter_Undo(t *testing.T) {
|
||||
}
|
||||
|
||||
// undo changes
|
||||
err = router.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
err = router.Reconcile(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
18
pkg/router/kubernetes_noop.go
Normal file
18
pkg/router/kubernetes_noop.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
// KubernetesNoopRouter manages nothing. This is useful when one uses Flagger for progressive delivery of
|
||||
// services that are not load-balanced by a Kubernetes service
|
||||
type KubernetesNoopRouter struct {
|
||||
}
|
||||
|
||||
func (c *KubernetesNoopRouter) Initialize(canary *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *KubernetesNoopRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.20.3"
|
||||
var VERSION = "0.20.4"
|
||||
var REVISION = "unknown"
|
||||
|
||||
145
test/e2e-kubernetes-tests-svc.sh
Executable file
145
test/e2e-kubernetes-tests-svc.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Blue/Green initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Kustomize
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
|
||||
echo '>>> Installing the load tester'
|
||||
kubectl apply -k ${REPO_ROOT}/kustomize/tester
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
|
||||
kubectl apply -n test -f - <<EOS
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
selector:
|
||||
app: podinfo
|
||||
type: ClusterIP
|
||||
EOS
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: kubernetes
|
||||
targetRef:
|
||||
apiVersion: core/v1
|
||||
kind: Service
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: "gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
kubectl -n test rollout status deploy/podinfo
|
||||
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Initializing secondary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload-v2.yaml
|
||||
|
||||
echo '>>> Waiting for secondary to be ready'
|
||||
kubectl -n test rollout status deploy/podinfo-v2
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl apply -n test -f - <<EOS
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
selector:
|
||||
app: podinfo-v2
|
||||
type: ClusterIP
|
||||
EOS
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe service/podinfo-primary | grep 'podinfo-v2' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n flagger-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
@@ -4,7 +4,7 @@ set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
NGINX_VERSION=1.24.4
|
||||
NGINX_VERSION=1.26.0
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
|
||||
|
||||
68
test/e2e-workload-v2.yaml
Normal file
68
test/e2e-workload-v2.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo-v2
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo-v2
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo-v2
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9797"
|
||||
labels:
|
||||
app: podinfo-v2
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:3.1.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9898
|
||||
protocol: TCP
|
||||
- name: http-metrics
|
||||
containerPort: 9797
|
||||
protocol: TCP
|
||||
- name: grpc
|
||||
containerPort: 9999
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --port-metrics=9797
|
||||
- --grpc-port=9999
|
||||
- --grpc-service-name=podinfo
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
port: 9898
|
||||
path: /healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
port: 9898
|
||||
path: /readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 16Mi
|
||||
Reference in New Issue
Block a user