mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
456d914c35 | ||
|
|
737507b0fe | ||
|
|
4bcf82d295 | ||
|
|
e9cd7afc8a | ||
|
|
0830abd51d | ||
|
|
5b296e01b3 | ||
|
|
3fd039afd1 | ||
|
|
5904348ba5 | ||
|
|
1a98e93723 | ||
|
|
c9685fbd13 | ||
|
|
dc347e273d | ||
|
|
8170916897 | ||
|
|
71cd4e0cb7 | ||
|
|
0109788ccc | ||
|
|
1649dea468 |
1
Makefile
1
Makefile
@@ -46,6 +46,7 @@ version-set:
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and charts"
|
||||
|
||||
version-up:
|
||||
|
||||
16
README.md
16
README.md
@@ -38,6 +38,9 @@ ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/ser
|
||||
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
|
||||
to drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
@@ -48,28 +51,28 @@ Gated canary promotion stages:
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call webhooks and check results
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
* wait for the canary deployment to be updated and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* promote canary to primary
|
||||
* copy ConfigMaps and Secrets from canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
@@ -248,6 +251,9 @@ kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
**Note** that Flagger tracks changes in the deployment `PodSpec` but also in `ConfigMaps` and `Secrets`
|
||||
that are referenced in the pod's volumes and containers environment variables.
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new canary analysis:
|
||||
|
||||
```
|
||||
@@ -336,6 +342,8 @@ Events:
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
### Monitoring
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis.
|
||||
|
||||
58
artifacts/configs/canary.yaml
Normal file
58
artifacts/configs/canary.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
16
artifacts/configs/configs.yaml
Normal file
16
artifacts/configs/configs.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-env
|
||||
namespace: test
|
||||
data:
|
||||
color: blue
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-vol
|
||||
namespace: test
|
||||
data:
|
||||
output: console
|
||||
textmode: "true"
|
||||
89
artifacts/configs/deployment.yaml
Normal file
89
artifacts/configs/deployment.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: podinfo-config-env
|
||||
key: color
|
||||
- name: SECRET_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: podinfo-secret-env
|
||||
key: user
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: configs
|
||||
mountPath: /etc/podinfo/configs
|
||||
readOnly: true
|
||||
- name: secrets
|
||||
mountPath: /etc/podinfo/secrets
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: configs
|
||||
configMap:
|
||||
name: podinfo-config-vol
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: podinfo-secret-vol
|
||||
19
artifacts/configs/hpa.yaml
Normal file
19
artifacts/configs/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 1
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
16
artifacts/configs/secrets.yaml
Normal file
16
artifacts/configs/secrets.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-env
|
||||
namespace: test
|
||||
data:
|
||||
password: cGFzc3dvcmQ=
|
||||
user: YWRtaW4=
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-vol
|
||||
namespace: test
|
||||
data:
|
||||
key: cGFzc3dvcmQ=
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/stefanprodan/flagger:0.4.1
|
||||
image: quay.io/stefanprodan/flagger:0.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.4.1
|
||||
appVersion: 0.4.1
|
||||
version: 0.5.0
|
||||
appVersion: 0.5.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger
|
||||
tag: 0.4.1
|
||||
tag: 0.5.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
|
||||
|
||||
@@ -97,38 +97,42 @@ the Istio Virtual Service. The container port from the target deployment should
|
||||
|
||||

|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* creates the primary deployment if needed
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call webhooks and check results
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* halt advancement if any of the webhook calls are failing
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* promote canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* copy ConfigMaps and Secrets from canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark the canary deployment as finished
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
* mark rollout as finished
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
### Canary Analysis
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace example.com with your own domain\):
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
@@ -146,6 +146,8 @@ Events:
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
@@ -181,7 +183,8 @@ Generate latency:
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
@@ -93,6 +93,8 @@ type CanaryStatus struct {
|
||||
FailedChecks int `json:"failedChecks"`
|
||||
CanaryWeight int `json:"canaryWeight"`
|
||||
// +optional
|
||||
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
|
||||
// +optional
|
||||
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
|
||||
@@ -188,6 +188,17 @@ func (in *CanarySpec) DeepCopy() *CanarySpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CanaryStatus) DeepCopyInto(out *CanaryStatus) {
|
||||
*out = *in
|
||||
if in.TrackedConfigs != nil {
|
||||
in, out := &in.TrackedConfigs, &out.TrackedConfigs
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,6 +74,11 @@ func NewController(
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
configTracker: ConfigTracker{
|
||||
logger: logger,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
|
||||
router := CanaryRouter{
|
||||
|
||||
523
pkg/controller/controller_test.go
Normal file
523
pkg/controller/controller_test.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
hpav2 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
alwaysReady = func() bool { return true }
|
||||
noResyncPeriodFunc = func() time.Duration { return 0 }
|
||||
)
|
||||
|
||||
type Mocks struct {
|
||||
canary *v1alpha3.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
istioClient istioclientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
deployer CanaryDeployer
|
||||
router CanaryRouter
|
||||
observer CanaryObserver
|
||||
ctrl *Controller
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func SetupMocks() Mocks {
|
||||
// init canary
|
||||
canary := newTestCanary()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
// init kube clientset and register mock objects
|
||||
kubeClient := fake.NewSimpleClientset(
|
||||
newTestDeployment(),
|
||||
newTestHPA(),
|
||||
NewTestConfigMap(),
|
||||
NewTestConfigMapEnv(),
|
||||
NewTestConfigMapVol(),
|
||||
NewTestSecret(),
|
||||
NewTestSecretEnv(),
|
||||
NewTestSecretVol(),
|
||||
)
|
||||
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
|
||||
// init controller helpers
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
configTracker: ConfigTracker{
|
||||
logger: logger,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
|
||||
// init controller
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
return Mocks{
|
||||
canary: canary,
|
||||
observer: observer,
|
||||
router: router,
|
||||
deployer: deployer,
|
||||
logger: logger,
|
||||
flaggerClient: flaggerClient,
|
||||
istioClient: istioClient,
|
||||
kubeClient: kubeClient,
|
||||
ctrl: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMap() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapV2() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "blue",
|
||||
"output": "console",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapEnv() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapVol() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecret() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretV2() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test2"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretEnv() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-all-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretVol() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-vol",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newTestCanary() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
AutoscalerRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "autoscaling/v2beta1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
}, Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "istio_requests_total",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
{
|
||||
Name: "istio_request_duration_seconds_bucket",
|
||||
Threshold: 500,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newTestDeployment() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.0",
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
Args: nil,
|
||||
WorkingDir: "",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "PODINFO_UI_COLOR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Key: "color",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "API_KEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Key: "apiKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnvFrom: []corev1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1.SecretEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/etc/podinfo/config",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
MountPath: "/etc/podinfo/secret",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "podinfo-secret-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestDeploymentV2() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.1",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "PODINFO_UI_COLOR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Key: "color",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "API_KEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Key: "apiKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnvFrom: []corev1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/etc/podinfo/config",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
MountPath: "/etc/podinfo/secret",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "podinfo-secret-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
|
||||
h := &hpav2.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: hpav2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: hpav2.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Metrics: []hpav2.MetricSpec{
|
||||
{
|
||||
Type: "Resource",
|
||||
Resource: &hpav2.ResourceMetricSource{
|
||||
Name: "cpu",
|
||||
TargetAverageUtilization: int32p(99),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -28,9 +30,10 @@ type CanaryDeployer struct {
|
||||
istioClient istioclientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
configTracker ConfigTracker
|
||||
}
|
||||
|
||||
// Promote copies the pod spec from canary to primary
|
||||
// Promote copies the pod spec, secrets and config maps from canary to primary
|
||||
func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
@@ -51,12 +54,30 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
|
||||
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
// promote secrets and config maps
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
primaryCopy := primary.DeepCopy()
|
||||
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
|
||||
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
|
||||
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
|
||||
primaryCopy.Spec.Strategy = canary.Spec.Strategy
|
||||
primaryCopy.Spec.Template.Spec = canary.Spec.Template.Spec
|
||||
|
||||
// update spec with primary secrets and config maps
|
||||
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
|
||||
|
||||
// update pod annotations to ensure a rolling update
|
||||
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
primaryCopy.Spec.Template.Annotations = annotations
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
@@ -157,7 +178,22 @@ func (c *CanaryDeployer) ShouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true, nil
|
||||
}
|
||||
return c.IsNewSpec(cd)
|
||||
|
||||
newDep, err := c.IsNewSpec(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newDep {
|
||||
return newDep, nil
|
||||
}
|
||||
|
||||
newCfg, err := c.configTracker.HasConfigChanged(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return newCfg, nil
|
||||
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
@@ -218,12 +254,18 @@ func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.Canar
|
||||
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
configs, err := c.configTracker.GetConfigRefs(cd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("configs query error %v", err)
|
||||
}
|
||||
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = status.Phase
|
||||
cdCopy.Status.CanaryWeight = status.CanaryWeight
|
||||
cdCopy.Status.FailedChecks = status.FailedChecks
|
||||
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
cdCopy.Status.TrackedConfigs = configs
|
||||
|
||||
cd, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
@@ -290,11 +332,25 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
|
||||
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// create primary secrets and config maps
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create primary deployment
|
||||
primaryDep = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Annotations: canaryDep.Annotations,
|
||||
Namespace: cd.Namespace,
|
||||
Name: primaryName,
|
||||
Labels: canaryDep.Labels,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
@@ -317,9 +373,10 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"app": primaryName},
|
||||
Annotations: canaryDep.Spec.Template.Annotations,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: canaryDep.Spec.Template.Spec,
|
||||
// update spec with the primary secrets and config maps
|
||||
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -353,6 +410,7 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryHpaName,
|
||||
Namespace: cd.Namespace,
|
||||
Labels: hpa.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
@@ -432,3 +490,26 @@ func (c *CanaryDeployer) getDeploymentCondition(
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeAnnotations appends an unique ID to annotations map
|
||||
func (c *CanaryDeployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
idKey := "flagger-id"
|
||||
res := make(map[string]string)
|
||||
uuid := make([]byte, 16)
|
||||
n, err := io.ReadFull(rand.Reader, uuid)
|
||||
if n != len(uuid) || err != nil {
|
||||
return res, err
|
||||
}
|
||||
uuid[8] = uuid[8]&^0xc0 | 0x80
|
||||
uuid[6] = uuid[6]&^0xf0 | 0x40
|
||||
id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
|
||||
|
||||
for k, v := range annotations {
|
||||
if k != idKey {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
res[idKey] = id
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -4,200 +4,24 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
hpav2 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func newTestCanary() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
AutoscalerRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "autoscaling/v2beta1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
}, Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "istio_requests_total",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
{
|
||||
Name: "istio_request_duration_seconds_bucket",
|
||||
Threshold: 500,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newTestDeployment() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.0",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestDeploymentUpdated() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.1",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
|
||||
h := &hpav2.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: hpav2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: hpav2.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Metrics: []hpav2.MetricSpec{
|
||||
{
|
||||
Type: "Resource",
|
||||
Resource: &hpav2.ResourceMetricSource{
|
||||
Name: "cpu",
|
||||
TargetAverageUtilization: int32p(99),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
depPrimary, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
configMap := NewTestConfigMap()
|
||||
secret := NewTestSecret()
|
||||
|
||||
primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image
|
||||
sourceImage := dep.Spec.Template.Spec.Containers[0].Image
|
||||
@@ -205,7 +29,7 @@ func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
t.Errorf("Got image %s wanted %s", primaryImage, sourceImage)
|
||||
}
|
||||
|
||||
hpaPrimary, err := kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -213,37 +37,76 @@ func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
if hpaPrimary.Spec.ScaleTargetRef.Name != depPrimary.Name {
|
||||
t.Errorf("Got HPA target %s wanted %s", hpaPrimary.Spec.ScaleTargetRef.Name, depPrimary.Name)
|
||||
}
|
||||
|
||||
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if configPrimary.Data["color"] != configMap.Data["color"] {
|
||||
t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"])
|
||||
}
|
||||
|
||||
configPrimaryEnv, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-all-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if configPrimaryEnv.Data["color"] != configMap.Data["color"] {
|
||||
t.Errorf("Got ConfigMap %s wanted %s", configPrimaryEnv.Data["a"], configMap.Data["color"])
|
||||
}
|
||||
|
||||
configPrimaryVol, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if configPrimaryVol.Data["color"] != configMap.Data["color"] {
|
||||
t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"])
|
||||
}
|
||||
|
||||
secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if string(secretPrimary.Data["apiKey"]) != string(secret.Data["apiKey"]) {
|
||||
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
|
||||
}
|
||||
|
||||
secretPrimaryEnv, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-all-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if string(secretPrimaryEnv.Data["apiKey"]) != string(secret.Data["apiKey"]) {
|
||||
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
|
||||
}
|
||||
|
||||
secretPrimaryVol, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-vol-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if string(secretPrimaryVol.Data["apiKey"]) != string(secret.Data["apiKey"]) {
|
||||
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
isNew, err := deployer.IsNewSpec(canary)
|
||||
isNew, err := mocks.deployer.IsNewSpec(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -254,39 +117,30 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.Promote(canary)
|
||||
config2 := NewTestConfigMapV2()
|
||||
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
depPrimary, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
err = mocks.deployer.Promote(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -296,67 +150,48 @@ func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
if primaryImage != sourceImage {
|
||||
t.Errorf("Got image %s wanted %s", primaryImage, sourceImage)
|
||||
}
|
||||
|
||||
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if configPrimary.Data["color"] != config2.Data["color"] {
|
||||
t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_IsReady(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
_, err = deployer.IsPrimaryReady(canary)
|
||||
_, err = mocks.deployer.IsPrimaryReady(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
_, err = deployer.IsCanaryReady(canary)
|
||||
_, err = mocks.deployer.IsCanaryReady(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.SetStatusFailedChecks(canary, 1)
|
||||
err = mocks.deployer.SetStatusFailedChecks(mocks.canary, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -367,32 +202,18 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.SetStatusPhase(canary, v1alpha3.CanaryProgressing)
|
||||
err = mocks.deployer.SetStatusPhase(mocks.canary, v1alpha3.CanaryProgressing)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -403,22 +224,8 @@ func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -427,12 +234,12 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
Phase: v1alpha3.CanaryProgressing,
|
||||
FailedChecks: 2,
|
||||
}
|
||||
err = deployer.SyncStatus(canary, status)
|
||||
err = mocks.deployer.SyncStatus(mocks.canary, status)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -444,32 +251,27 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
if res.Status.FailedChecks != status.FailedChecks {
|
||||
t.Errorf("Got failed checks %v wanted %v", res.Status.FailedChecks, status.FailedChecks)
|
||||
}
|
||||
|
||||
if res.Status.TrackedConfigs == nil {
|
||||
t.Fatalf("Status tracking configs are empty")
|
||||
}
|
||||
configs := *res.Status.TrackedConfigs
|
||||
secret := NewTestSecret()
|
||||
if _, exists := configs["secret/"+secret.GetName()]; !exists {
|
||||
t.Errorf("Secret %s not found in status", secret.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_Scale(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := &CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := deployer.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.Scale(canary, 2)
|
||||
err = mocks.deployer.Scale(mocks.canary, 2)
|
||||
|
||||
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
|
||||
c, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
@@ -5,37 +5,17 @@ import (
|
||||
"testing"
|
||||
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestCanaryRouter_Sync(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
|
||||
router := &CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := router.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canarySvc, err := kubeClient.CoreV1().Services("default").Get("podinfo", metav1.GetOptions{})
|
||||
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -48,7 +28,7 @@ func TestCanaryRouter_Sync(t *testing.T) {
|
||||
t.Errorf("Got svc port %v wanted %v", canarySvc.Spec.Ports[0].Port, 9898)
|
||||
}
|
||||
|
||||
primarySvc, err := kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
primarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -61,7 +41,7 @@ func TestCanaryRouter_Sync(t *testing.T) {
|
||||
t.Errorf("Got primary svc port %v wanted %v", primarySvc.Spec.Ports[0].Port, 9898)
|
||||
}
|
||||
|
||||
vs, err := istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -76,29 +56,13 @@ func TestCanaryRouter_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryRouter_GetRoutes(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
|
||||
router := &CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := router.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(canary)
|
||||
p, c, err := mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -113,29 +77,13 @@ func TestCanaryRouter_GetRoutes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryRouter_SetRoutes(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
|
||||
router := &CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
err := router.Sync(canary)
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(canary)
|
||||
p, c, err := mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -143,12 +91,12 @@ func TestCanaryRouter_SetRoutes(t *testing.T) {
|
||||
p.Weight = 50
|
||||
c.Weight = 50
|
||||
|
||||
err = router.SetRoutes(canary, p, c)
|
||||
err = mocks.router.SetRoutes(mocks.canary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
vs, err := istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -158,10 +106,10 @@ func TestCanaryRouter_SetRoutes(t *testing.T) {
|
||||
|
||||
for _, http := range vs.Spec.Http {
|
||||
for _, route := range http.Route {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name) {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", mocks.canary.Spec.TargetRef.Name) {
|
||||
pRoute = route
|
||||
}
|
||||
if route.Destination.Host == canary.Spec.TargetRef.Name {
|
||||
if route.Destination.Host == mocks.canary.Spec.TargetRef.Name {
|
||||
cRoute = route
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,10 +91,13 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := c.deployer.ShouldAdvance(cd); !ok {
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
}
|
||||
shouldAdvance, err := c.deployer.ShouldAdvance(cd)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !shouldAdvance {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -123,7 +126,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
|
||||
|
||||
// check if canary analysis should start (canary revision has changes) or continue
|
||||
if ok := c.checkCanaryStatus(cd); !ok {
|
||||
if ok := c.checkCanaryStatus(cd, shouldAdvance); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -291,7 +294,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary) bool {
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(cd)
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true
|
||||
@@ -309,11 +312,11 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if diff, err := c.deployer.IsNewSpec(cd); diff {
|
||||
if shouldAdvance {
|
||||
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "New revision detected, starting canary analysis.",
|
||||
true, false)
|
||||
if err = c.deployer.Scale(cd, 1); err != nil {
|
||||
if err := c.deployer.Scale(cd, 1); err != nil {
|
||||
c.recordEventErrorf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
@@ -332,6 +335,9 @@ func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
|
||||
if diff, _ := c.deployer.IsNewSpec(cd); diff {
|
||||
return true
|
||||
}
|
||||
if diff, _ := c.deployer.configTracker.HasConfigChanged(cd); diff {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,137 +1,36 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
alwaysReady = func() bool { return true }
|
||||
noResyncPeriodFunc = func() time.Duration { return 0 }
|
||||
)
|
||||
|
||||
func newTestController(
|
||||
kubeClient kubernetes.Interface,
|
||||
istioClient istioclientset.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
logger *zap.SugaredLogger,
|
||||
deployer CanaryDeployer,
|
||||
router CanaryRouter,
|
||||
observer CanaryObserver,
|
||||
) *Controller {
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
return ctrl
|
||||
}
|
||||
|
||||
func TestScheduler_Init(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
mocks := SetupMocks()
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduler_NewRevision(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
mocks := SetupMocks()
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
|
||||
c, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -142,44 +41,20 @@ func TestScheduler_NewRevision(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScheduler_Rollback(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
mocks := SetupMocks()
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// update failed checks to max
|
||||
err := deployer.SyncStatus(canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
|
||||
err := mocks.deployer.SyncStatus(mocks.canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -190,47 +65,23 @@ func TestScheduler_Rollback(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
mocks := SetupMocks()
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// first update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
// advance
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
|
||||
primaryRoute, canaryRoute, err := mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -245,15 +96,15 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
|
||||
// second update
|
||||
dep2.Spec.Template.Spec.ServiceAccountName = "test"
|
||||
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
|
||||
primaryRoute, canaryRoute, err = mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -268,63 +119,54 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScheduler_Promotion(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
mocks := SetupMocks()
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
// detect pod spec changes
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
|
||||
config2 := NewTestConfigMapV2()
|
||||
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
secret2 := NewTestSecretV2()
|
||||
_, err = mocks.kubeClient.CoreV1().Secrets("default").Update(secret2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect configs changes
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err := mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
primaryRoute.Weight = 60
|
||||
canaryRoute.Weight = 40
|
||||
err = ctrl.router.SetRoutes(canary, primaryRoute, canaryRoute)
|
||||
err = mocks.ctrl.router.SetRoutes(mocks.canary, primaryRoute, canaryRoute)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// advance
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// promote
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
|
||||
primaryRoute, canaryRoute, err = mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -337,7 +179,7 @@ func TestScheduler_Promotion(t *testing.T) {
|
||||
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
|
||||
}
|
||||
|
||||
primaryDep, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
primaryDep, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -348,7 +190,25 @@ func TestScheduler_Promotion(t *testing.T) {
|
||||
t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage)
|
||||
}
|
||||
|
||||
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if configPrimary.Data["color"] != config2.Data["color"] {
|
||||
t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"])
|
||||
}
|
||||
|
||||
secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if string(secretPrimary.Data["apiKey"]) != string(secret2.Data["apiKey"]) {
|
||||
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret2.Data["apiKey"])
|
||||
}
|
||||
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
374
pkg/controller/tracker.go
Normal file
374
pkg/controller/tracker.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets
|
||||
type ConfigTracker struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
type ConfigRefType string
|
||||
|
||||
const (
|
||||
ConfigRefMap ConfigRefType = "configmap"
|
||||
ConfigRefSecret ConfigRefType = "secret"
|
||||
)
|
||||
|
||||
// ConfigRef holds the reference to a tracked Kubernetes ConfigMap or Secret
|
||||
type ConfigRef struct {
|
||||
Name string
|
||||
Type ConfigRefType
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// GetName returns the config ref type and name
|
||||
func (c *ConfigRef) GetName() string {
|
||||
return fmt.Sprintf("%s/%s", c.Type, c.Name)
|
||||
}
|
||||
|
||||
func checksum(data interface{}) string {
|
||||
jsonBytes, _ := json.Marshal(data)
|
||||
hashBytes := sha256.Sum256(jsonBytes)
|
||||
|
||||
return fmt.Sprintf("%x", hashBytes[:8])
|
||||
}
|
||||
|
||||
// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef
|
||||
// and computes the checksum of the ConfigMap data
|
||||
func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) {
|
||||
config, err := ct.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ConfigRef{
|
||||
Name: config.Name,
|
||||
Type: ConfigRefMap,
|
||||
Checksum: checksum(config.Data),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getRefFromConfigMap transforms a Kubernetes Secret into a ConfigRef
|
||||
// and computes the checksum of the Secret data
|
||||
func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*ConfigRef, error) {
|
||||
secret, err := ct.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ignore registry secrets (those should be set via service account)
|
||||
if secret.Type != corev1.SecretTypeOpaque &&
|
||||
secret.Type != corev1.SecretTypeBasicAuth &&
|
||||
secret.Type != corev1.SecretTypeSSHAuth &&
|
||||
secret.Type != corev1.SecretTypeTLS {
|
||||
ct.logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &ConfigRef{
|
||||
Name: secret.Name,
|
||||
Type: ConfigRefSecret,
|
||||
Checksum: checksum(secret.Data),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetTargetConfigs scans the target deployment for Kubernetes ConfigMaps and Secretes
|
||||
// and returns a list of config references
|
||||
func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) {
|
||||
res := make(map[string]ConfigRef)
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
targetDep, err := ct.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
// scan volumes
|
||||
for _, volume := range targetDep.Spec.Template.Spec.Volumes {
|
||||
if cmv := volume.ConfigMap; cmv != nil {
|
||||
config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
res[config.GetName()] = *config
|
||||
}
|
||||
}
|
||||
|
||||
if sv := volume.Secret; sv != nil {
|
||||
secret, err := ct.getRefFromSecret(sv.SecretName, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
res[secret.GetName()] = *secret
|
||||
}
|
||||
}
|
||||
}
|
||||
// scan containers
|
||||
for _, container := range targetDep.Spec.Template.Spec.Containers {
|
||||
// scan env
|
||||
for _, env := range container.Env {
|
||||
if env.ValueFrom != nil {
|
||||
switch {
|
||||
case env.ValueFrom.ConfigMapKeyRef != nil:
|
||||
name := env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name
|
||||
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
res[config.GetName()] = *config
|
||||
}
|
||||
case env.ValueFrom.SecretKeyRef != nil:
|
||||
name := env.ValueFrom.SecretKeyRef.LocalObjectReference.Name
|
||||
secret, err := ct.getRefFromSecret(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
res[secret.GetName()] = *secret
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// scan envFrom
|
||||
for _, envFrom := range container.EnvFrom {
|
||||
switch {
|
||||
case envFrom.ConfigMapRef != nil:
|
||||
name := envFrom.ConfigMapRef.LocalObjectReference.Name
|
||||
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
res[config.GetName()] = *config
|
||||
}
|
||||
case envFrom.SecretRef != nil:
|
||||
name := envFrom.SecretRef.LocalObjectReference.Name
|
||||
secret, err := ct.getRefFromSecret(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
res[secret.GetName()] = *secret
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// GetConfigRefs returns a map of configs and their checksum
|
||||
func (ct *ConfigTracker) GetConfigRefs(cd *flaggerv1.Canary) (*map[string]string, error) {
|
||||
res := make(map[string]string)
|
||||
configs, err := ct.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, cfg := range configs {
|
||||
res[cfg.GetName()] = cfg.Checksum
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// HasConfigChanged checks for changes in ConfigMaps and Secretes by comparing
|
||||
// the checksum for each ConfigRef stored in Canary.Status.TrackedConfigs
|
||||
func (ct *ConfigTracker) HasConfigChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
configs, err := ct.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(configs) == 0 && cd.Status.TrackedConfigs == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(configs) > 0 && cd.Status.TrackedConfigs == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
trackedConfigs := *cd.Status.TrackedConfigs
|
||||
|
||||
if len(configs) != len(trackedConfigs) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
for _, cfg := range configs {
|
||||
if trackedConfigs[cfg.GetName()] != cfg.Checksum {
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("%s %s has changed", cfg.Type, cfg.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CreatePrimaryConfigs syncs the primary Kubernetes ConfigMaps and Secretes
|
||||
// with those found in the target deployment
|
||||
func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[string]ConfigRef) error {
|
||||
for _, ref := range refs {
|
||||
switch ref.Type {
|
||||
case ConfigRefMap:
|
||||
config, err := ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
primaryName := fmt.Sprintf("%s-primary", config.GetName())
|
||||
primaryConfigMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Namespace: cd.Namespace,
|
||||
Labels: config.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Data: config.Data,
|
||||
}
|
||||
|
||||
// update or insert primary ConfigMap
|
||||
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("ConfigMap %s synced", primaryConfigMap.GetName())
|
||||
case ConfigRefSecret:
|
||||
secret, err := ct.kubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
primaryName := fmt.Sprintf("%s-primary", secret.GetName())
|
||||
primarySecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Namespace: cd.Namespace,
|
||||
Labels: secret.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Type: secret.Type,
|
||||
Data: secret.Data,
|
||||
}
|
||||
|
||||
// update or insert primary Secret
|
||||
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("Secret %s synced", primarySecret.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyPrimaryConfigs appends the primary suffix to all ConfigMaps and Secretes found in the PodSpec
|
||||
func (ct *ConfigTracker) ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec {
|
||||
// update volumes
|
||||
for i, volume := range spec.Volumes {
|
||||
if cmv := volume.ConfigMap; cmv != nil {
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefMap, cmv.Name)
|
||||
if _, exists := refs[name]; exists {
|
||||
spec.Volumes[i].ConfigMap.Name += "-primary"
|
||||
}
|
||||
}
|
||||
|
||||
if sv := volume.Secret; sv != nil {
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefSecret, sv.SecretName)
|
||||
if _, exists := refs[name]; exists {
|
||||
spec.Volumes[i].Secret.SecretName += "-primary"
|
||||
}
|
||||
}
|
||||
}
|
||||
// update containers
|
||||
for _, container := range spec.Containers {
|
||||
// update env
|
||||
for i, env := range container.Env {
|
||||
if env.ValueFrom != nil {
|
||||
switch {
|
||||
case env.ValueFrom.ConfigMapKeyRef != nil:
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefMap, env.ValueFrom.ConfigMapKeyRef.Name)
|
||||
if _, exists := refs[name]; exists {
|
||||
container.Env[i].ValueFrom.ConfigMapKeyRef.Name += "-primary"
|
||||
}
|
||||
case env.ValueFrom.SecretKeyRef != nil:
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefSecret, env.ValueFrom.SecretKeyRef.Name)
|
||||
if _, exists := refs[name]; exists {
|
||||
container.Env[i].ValueFrom.SecretKeyRef.Name += "-primary"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// update envFrom
|
||||
for i, envFrom := range container.EnvFrom {
|
||||
switch {
|
||||
case envFrom.ConfigMapRef != nil:
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefMap, envFrom.ConfigMapRef.Name)
|
||||
if _, exists := refs[name]; exists {
|
||||
container.EnvFrom[i].ConfigMapRef.Name += "-primary"
|
||||
}
|
||||
case envFrom.SecretRef != nil:
|
||||
name := fmt.Sprintf("%s/%s", ConfigRefSecret, envFrom.SecretRef.Name)
|
||||
if _, exists := refs[name]; exists {
|
||||
container.EnvFrom[i].SecretRef.Name += "-primary"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.4.1"
|
||||
var VERSION = "0.5.0"
|
||||
var REVISION = "unknown"
|
||||
|
||||
Reference in New Issue
Block a user