mirror of
https://github.com/fluxcd/flagger.git
synced 2026-03-02 17:51:00 +00:00
Resolves #371 --- This adds the support for `corev1.Service` as the `targetRef.kind`, so that we can use Flagger just for canary analysis and traffic-shifting on existing and pre-created services. Flagger doesn't touch deployments and HPAs in this mode. This is useful for keeping your full-control on the resources backing the service to be canary-released, including pods(behind a ClusterIP service) and external services(behind an ExternalName service). Major use-case in my mind are: - Canary-release a K8s cluster. You create two clusters and a master cluster. In the master cluster, you create two `ExternalName` services pointing to (the hostname of the loadbalancer of the targeted app instance in) each cluster. Flagger runs on the master cluster and helps safely rolling-out a new K8s cluster by doing a canary release on the `ExternalName` service. - You want annotations and labels added to the service for integrating with things like external lbs(without extending Flagger to support customizing any aspect of the K8s service it manages **Design**: A canary release on a K8s service is almost the same as one on a K8s deployment. The only fundamental difference is that it operates only on a set of K8s services. For example, one may start by creating two Helm releases for `podinfo-blue` and `podinfo-green`, and a K8s service `podinfo`. The `podinfo` service should initially have the same `Spec` as that of `podinfo-blue`. On a new release, you update `podinfo-green`, then trigger Flagger by updating the K8s service `podinfo` so that it points to pods or `externalName` as declared in `podinfo-green`. Flagger does the rest. The end result is the traffic to `podinfo` is gradually and safely shifted from `podinfo-blue` to `podinfo-green`. **How it works**: Under the hood, Flagger maintains two K8s services, `podinfo-primary` and `podinfo-canary`. Compared to canaries on K8s deployments, it doesn't create the service named `podinfo`, as it is already provided by YOU. Once Flagger detects the change in the `podinfo` service, it updates the `podinfo-canary` service and the routes, then analyzes the canary. On successful analysis, it promotes the canary service to the `podinfo-primary` service. You expose the `podinfo` service via any L7 ingress solution or a service mesh so that the traffic is managed by Flagger for safe deployments. **Giving it a try**: To give it a try, create a `Canary` as usual, but its `targetRef` pointed to a K8s service: ``` apiVersion: flagger.app/v1alpha3 kind: Canary metadata: name: podinfo spec: provider: kubernetes targetRef: apiVersion: core/v1 kind: Service name: podinfo service: port: 9898 canaryAnalysis: # schedule interval (default 60s) interval: 10s # max number of failed checks before rollback threshold: 2 # number of checks to run before rollback iterations: 2 # Prometheus checks based on # http_request_duration_seconds histogram metrics: [] ``` Create a K8s service named `podinfo`, and update it. Now watch for the services `podinfo`, `podinfo-primary`, `podinfo-canary`. Flagger tracks `podinfo` service for changes. Upon any change, it reconciles `podinfo-primary` and `podinfo-canary` services. `podinfo-canary` always replicate the latest `podinfo`. In contract, `podinfo-primary` replicates the latest successful `podinfo-canary`. **Notes**: - For the canary cluster use-case, we would need to write a K8s operator to, e.g. for App Mesh, sync `ExternalName` services to AppMesh `VirtualNode`s. But that's another story!
167 lines
4.3 KiB
Go
167 lines
4.3 KiB
Go
package controller
|
|
|
|
import (
|
|
"testing"
|
|
|
|
hpav1 "k8s.io/api/autoscaling/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
|
)
|
|
|
|
func TestScheduler_ServicePromotion(t *testing.T) {
|
|
mocks := SetupMocks(newTestServiceCanary())
|
|
|
|
// init
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
// check initialized status
|
|
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if c.Status.Phase != flaggerv1.CanaryPhaseInitialized {
|
|
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseInitialized)
|
|
}
|
|
|
|
// update
|
|
svc2 := newTestServiceV2()
|
|
_, err = mocks.kubeClient.CoreV1().Services("default").Update(svc2)
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
// detect service spec changes
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
primaryWeight = 60
|
|
canaryWeight = 40
|
|
err = mocks.router.SetRoutes(mocks.canary, primaryWeight, canaryWeight, mirrored)
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
// advance
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
// check progressing status
|
|
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if c.Status.Phase != flaggerv1.CanaryPhaseProgressing {
|
|
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseProgressing)
|
|
}
|
|
|
|
// promote
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
// check promoting status
|
|
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if c.Status.Phase != flaggerv1.CanaryPhasePromoting {
|
|
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhasePromoting)
|
|
}
|
|
|
|
// finalise
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary)
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if primaryWeight != 100 {
|
|
t.Errorf("Got primary route %v wanted %v", primaryWeight, 100)
|
|
}
|
|
|
|
if canaryWeight != 0 {
|
|
t.Errorf("Got canary route %v wanted %v", canaryWeight, 0)
|
|
}
|
|
|
|
if mirrored != false {
|
|
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
|
|
}
|
|
|
|
primarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
primaryLabelValue := primarySvc.Spec.Selector["app"]
|
|
canaryLabelValue := svc2.Spec.Selector["app"]
|
|
if primaryLabelValue != canaryLabelValue {
|
|
t.Errorf("Got primary selector label value %v wanted %v", primaryLabelValue, canaryLabelValue)
|
|
}
|
|
|
|
// check finalising status
|
|
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if c.Status.Phase != flaggerv1.CanaryPhaseFinalising {
|
|
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising)
|
|
}
|
|
|
|
// scale canary to zero
|
|
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
|
|
|
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
|
if err != nil {
|
|
t.Fatal(err.Error())
|
|
}
|
|
|
|
if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded {
|
|
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded)
|
|
}
|
|
}
|
|
|
|
func newTestServiceCanary() *flaggerv1.Canary {
|
|
cd := &flaggerv1.Canary{
|
|
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Namespace: "default",
|
|
Name: "podinfo",
|
|
},
|
|
Spec: flaggerv1.CanarySpec{
|
|
TargetRef: hpav1.CrossVersionObjectReference{
|
|
Name: "podinfo",
|
|
APIVersion: "core/v1",
|
|
Kind: "Service",
|
|
},
|
|
Service: flaggerv1.CanaryService{
|
|
Port: 9898,
|
|
},
|
|
CanaryAnalysis: flaggerv1.CanaryAnalysis{
|
|
Threshold: 10,
|
|
StepWeight: 10,
|
|
MaxWeight: 50,
|
|
Metrics: []flaggerv1.CanaryMetric{
|
|
{
|
|
Name: "istio_requests_total",
|
|
Threshold: 99,
|
|
Interval: "1m",
|
|
},
|
|
{
|
|
Name: "istio_request_duration_seconds_bucket",
|
|
Threshold: 500,
|
|
Interval: "1m",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
return cd
|
|
}
|