Compare commits

...

22 Commits

Author SHA1 Message Date
stakater-user
5662919f72 [skip-ci] Update artifacts 2022-10-10 10:56:58 +00:00
Faizan Ahmad
445d0f870e Merge pull request #345 from avestuk/podDisruptionBudget
Add pod disruption budget
2022-10-10 12:50:26 +02:00
stakater-user
81e74fe830 [skip-ci] Update artifacts 2022-10-10 09:03:48 +00:00
Faizan Ahmad
50791ad51a Merge pull request #341 from avestuk/leadership-election
Leadership election
2022-10-10 10:57:25 +02:00
Alex Vest
6a65657e27 Add pod disruption budget 2022-10-06 11:51:28 +01:00
Alex Vest
1c7190884a Merge branch 'master' into leadership-election 2022-10-06 11:17:35 +01:00
Alex Vest
488eaa9bef Run leadership election as non blocking
Liveness probe endpoint will always be blocking on the main thread
2022-10-04 16:41:34 +01:00
Alex Vest
676c3703aa Set replicas = 1 by default, override if HA is enabled 2022-10-04 16:41:34 +01:00
Alex Vest
deec4df125 Fix pod antiaffinity 2022-10-04 16:41:34 +01:00
Alex Vest
eedc8e81d0 Set enableHA and reloadOnCreate to false 2022-10-04 16:41:34 +01:00
Alex Vest
28456ffafe Add PodAntiAffinity if HA is enabled 2022-10-04 16:41:34 +01:00
Alex Vest
a7c3ae37aa Expand documentation about reloadOnCreate 2022-10-04 16:41:34 +01:00
Alex Vest
d043bcf7be Fix roles 2022-10-04 16:41:34 +01:00
Alex Vest
72a1c59cac Err check response writer 2022-10-04 16:41:34 +01:00
Alex Vest
6299b1d8e9 Update helm chart with new liveness probe 2022-10-04 16:41:34 +01:00
Alex Vest
11ae057b0a Add tests for leadership election
Pull liveness into leadership to ease testing, logically the liveness
probe is directly affected by leadership so it makes sense here.

Moved some of the components of the controller tests into the testutil
package for reuse in my own tests.
2022-10-04 16:41:34 +01:00
Alex Vest
d34c99baf4 Add liveness probe 2022-10-04 16:41:34 +01:00
Alex Vest
b7e83b74d8 Move leadership to its own package 2022-10-04 16:41:34 +01:00
Alex Vest
919f75bb62 Shutdown on leader election loss 2022-10-04 16:41:34 +01:00
Alex Vest
16079bd1d4 Update helm chart for HA in global mode 2022-10-04 16:41:34 +01:00
Alex Vest
401d4227d1 Move consts to const pkg
Should move leadership bits to own pkg?
2022-10-04 16:41:34 +01:00
Alex Vest
7f9f32ca58 Add leadership election 2022-10-04 16:41:34 +01:00
20 changed files with 524 additions and 36 deletions

View File

@@ -245,6 +245,14 @@ You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonito
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
If ReloadOnCreate is set to false:
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
## Help
### Documentation

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.122
appVersion: v0.0.122
version: v0.0.124
appVersion: v0.0.124
keywords:
- Reloader
- kubernetes

View File

@@ -28,6 +28,23 @@ heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end -}}
{{/*
Create pod anti affinity labels
*/}}
{{- define "reloader-podAntiAffinity" -}}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ template "reloader-fullname" . }}
topologyKey: "kubernetes.io/hostname"
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
@@ -45,4 +62,4 @@ Create the annotations to support helm3
{{- define "reloader-helm3.annotations" -}}
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}
{{- end -}}

View File

@@ -77,6 +77,16 @@ rules:
- get
- update
- patch
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- update
{{- end}}
- apiGroups:
- ""
resources:

View File

@@ -17,7 +17,11 @@ metadata:
name: {{ template "reloader-fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
{{- if not (.Values.reloader.enableHA) }}
replicas: 1
{{- else }}
replicas: {{ .Values.reloader.deployment.replicas }}
{{- end}}
revisionHistoryLimit: 2
selector:
matchLabels:
@@ -45,9 +49,12 @@ spec:
nodeSelector:
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.affinity }}
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
affinity:
{{- if .Values.reloader.deployment.affinity }}
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
{{- end}}
{{ include "reloader-podAntiAffinity" . | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
@@ -60,7 +67,7 @@ spec:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
@@ -92,14 +99,26 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- if .Values.reloader.enableHA }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- end }}
ports:
- name: http
containerPort: 9091
- name: metrics
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
@@ -108,7 +127,7 @@ spec:
readinessProbe:
httpGet:
path: /metrics
port: http
port: metrics
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
@@ -123,7 +142,7 @@ spec:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default")}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -169,6 +188,9 @@ spec:
{{- if ne .Values.reloader.reloadStrategy "default" }}
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
{{- end }}
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
- "--enable-ha=true"
{{- end}}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:

View File

@@ -0,0 +1,11 @@
{{- if .Values.reloader.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ template "reloader-fullname" . }}
spec:
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
selector:
matchLabels:
app: {{ template "reloader-fullname" . }}
{{- end }}

View File

@@ -77,6 +77,16 @@ rules:
- get
- update
- patch
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- update
{{- end}}
- apiGroups:
- ""
resources:

View File

@@ -18,12 +18,15 @@ reloader:
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true to enable leadership election allowing you to run multiple replicas
enableHA: false
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
# If you wish to run multiple replicas set reloader.enableHA = true
replicas: 1
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
@@ -62,10 +65,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.122
version: v0.0.124
image:
name: stakater/reloader
tag: v0.0.122
tag: v0.0.124
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -153,3 +156,8 @@ reloader:
# labels:
# Set timeout for scrape
# timeout: 10s
podDisruptionBudget:
enabled: false
# Set the minimum available replicas
# minAvailable: 1

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -8,13 +8,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.122
version: v0.0.124
name: reloader-reloader
namespace: default
spec:
@@ -28,25 +28,27 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.122
version: v0.0.124
spec:
containers:
- image: "stakater/reloader:v0.0.122"
- image: "stakater/reloader:v0.0.124"
imagePullPolicy: IfNotPresent
name: reloader-reloader
ports:
- name: http
containerPort: 9091
- name: metrics
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
@@ -55,7 +57,7 @@ spec:
readinessProbe:
httpGet:
path: /metrics
port: http
port: metrics
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10

View File

@@ -8,7 +8,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -8,7 +8,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -25,7 +25,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -80,7 +80,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -104,13 +104,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.122
version: v0.0.124
name: reloader-reloader
namespace: default
spec:
@@ -124,25 +124,27 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.122"
chart: "reloader-v0.0.124"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.122
version: v0.0.124
spec:
containers:
- image: "stakater/reloader:v0.0.122"
- image: "stakater/reloader:v0.0.124"
imagePullPolicy: IfNotPresent
name: reloader-reloader
ports:
- name: http
containerPort: 9091
- name: metrics
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
@@ -151,7 +153,7 @@ spec:
readinessProbe:
httpGet:
path: /metrics
port: http
port: metrics
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10

View File

@@ -1,12 +1,15 @@
package cmd
import (
"context"
"errors"
"fmt"
"github.com/stakater/Reloader/internal/pkg/constants"
"os"
"strings"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/leadership"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/controller"
@@ -38,21 +41,34 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
return cmd
}
func validateFlags(*cobra.Command, []string) error {
// Ensure the reload strategy is one of the following...
var validReloadStrategy bool
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
for _, s := range valid {
if s == options.ReloadStrategy {
return nil
validReloadStrategy = true
}
}
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
return errors.New(err)
if !validReloadStrategy {
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
return errors.New(err)
}
// Validate that HA options are correct
if options.EnableHA {
if err := validateHAEnvs(); err != nil {
return err
}
}
return nil
}
func configureLogging(logFormat string) error {
@@ -68,6 +84,25 @@ func configureLogging(logFormat string) error {
return nil
}
func validateHAEnvs() error {
podName, podNamespace := getHAEnvs()
if podName == "" {
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
}
if podNamespace == "" {
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
}
return nil
}
func getHAEnvs() (string, string) {
podName := os.Getenv(constants.PodNameEnv)
podNamespace := os.Getenv(constants.PodNamespaceEnv)
return podName, podNamespace
}
func startReloader(cmd *cobra.Command, args []string) {
err := configureLogging(options.LogFormat)
if err != nil {
@@ -99,6 +134,7 @@ func startReloader(cmd *cobra.Command, args []string) {
collectors := metrics.SetupPrometheusEndpoint()
var controllers []*controller.Controller
for k := range kube.ResourceMap {
if ignoredResourcesList.Contains(k) {
continue
@@ -109,6 +145,12 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatalf("%s", err)
}
controllers = append(controllers, c)
// If HA is enabled we only run the controller when
if options.EnableHA {
continue
}
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
@@ -116,8 +158,16 @@ func startReloader(cmd *cobra.Command, args []string) {
go c.Run(1, stop)
}
// Wait forever
select {}
// Run leadership election
if options.EnableHA {
podName, podNamespace := getHAEnvs()
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
}
logrus.Fatal(leadership.Healthz())
}
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {

View File

@@ -20,3 +20,10 @@ const (
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
AnnotationsReloadStrategy = "annotations"
)
// Leadership election related consts
const (
LockName string = "stakaer-reloader-lock"
PodNameEnv string = "POD_NAME"
PodNamespaceEnv string = "POD_NAMESPACE"
)

View File

@@ -65,6 +65,8 @@ func NewController(
c.queue = queue
c.collectors = collectors
c.recorder = recorder
logrus.Infof("created controller for: %s", resource)
return &c, nil
}

View File

@@ -0,0 +1,110 @@
package leadership
import (
"context"
"net/http"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/controller"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
)
const healthPort string = ":9091"
var (
// Used for liveness probe
m sync.Mutex
healthy bool = true
)
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
return &resourcelock.LeaseLock{
LeaseMeta: v1.ObjectMeta{
Name: lockName,
Namespace: namespace,
},
Client: client,
LockConfig: resourcelock.ResourceLockConfig{
Identity: podname,
},
}
}
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
// Construct channels for the controllers to use
var stopChannels []chan struct{}
for i := 0; i < len(controllers); i++ {
stop := make(chan struct{})
stopChannels = append(stopChannels, stop)
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(c context.Context) {
logrus.Info("became leader, starting controllers")
runControllers(controllers, stopChannels)
},
OnStoppedLeading: func() {
logrus.Info("no longer leader, shutting down")
stopControllers(stopChannels)
cancel()
m.Lock()
defer m.Unlock()
healthy = false
},
OnNewLeader: func(current_id string) {
if current_id == id {
logrus.Info("still the leader!")
return
}
logrus.Infof("new leader is %s", current_id)
},
},
})
}
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
for i, c := range controllers {
c := c
go c.Run(1, stopChannels[i])
}
}
func stopControllers(stopChannels []chan struct{}) {
for _, c := range stopChannels {
close(c)
}
}
// Healthz serves the liveness probe endpoint. If leadership election is
// enabled and a replica stops leading the liveness probe will fail and the
// kubelet will restart the container.
func Healthz() error {
http.HandleFunc("/live", healthz)
return http.ListenAndServe(healthPort, nil)
}
func healthz(w http.ResponseWriter, req *http.Request) {
m.Lock()
defer m.Unlock()
if healthy {
if i, err := w.Write([]byte("alive")); err != nil {
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
}
return
}
w.WriteHeader(http.StatusInternalServerError)
}

View File

@@ -0,0 +1,213 @@
package leadership
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/handler"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
)
func TestMain(m *testing.M) {
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
logrus.Infof("Running Testcases")
retCode := m.Run()
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
os.Exit(retCode)
}
func TestHealthz(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response := httptest.NewRecorder()
healthz(response, request)
got := response.Code
want := 200
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
// Have the liveness probe serve a 500
healthy = false
request, err = http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response = httptest.NewRecorder()
healthz(response, request)
got = response.Code
want = 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
}
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
// leadership election fails
func TestRunLeaderElection(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
// Liveness probe should be serving OK
request, err := http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response := httptest.NewRecorder()
healthz(response, request)
got := response.Code
want := 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
// Cancel the leader election context, so leadership is released and
// live endpoint serves 500
cancel()
request, err = http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response = httptest.NewRecorder()
healthz(response, request)
got = response.Code
want = 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
}
// TestRunLeaderElectionWithControllers tests that leadership election works
// wiht real controllers and that on context cancellation the controllers stop
// running.
func TestRunLeaderElectionWithControllers(t *testing.T) {
t.Logf("Creating controller")
var controllers []*controller.Controller
for k := range kube.ResourceMap {
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, metrics.NewCollectors())
if err != nil {
logrus.Fatalf("%s", err)
}
controllers = append(controllers, c)
}
time.Sleep(3 * time.Second)
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
ctx, cancel := context.WithCancel(context.TODO())
// Start running leadership election, this also starts the controllers
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
time.Sleep(3 * time.Second)
// Create some stuff and do a thing
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
if err != nil {
t.Fatalf("Error while creating the configmap %v", err)
}
// Creating deployment
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
if err != nil {
t.Fatalf("Error in deployment creation: %v", err)
}
// Updating configmap for first time
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
if updateErr != nil {
t.Fatalf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
// Verifying deployment update
logrus.Infof("Verifying pod envvars has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
config := util.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Fatalf("Deployment was not updated")
}
time.Sleep(testutil.SleepDuration)
// Cancel the leader election context, so leadership is released
logrus.Info("shutting down controller from test")
cancel()
time.Sleep(5 * time.Second)
// Updating configmap again
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
if updateErr != nil {
t.Fatalf("Configmap was not updated")
}
// Verifying that the deployment was not updated as leadership has been lost
logrus.Infof("Verifying pod envvars has not been updated")
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
config = util.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if updated {
t.Fatalf("Deployment was updated")
}
// Deleting deployment
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the deployment %v", err)
}
// Deleting configmap
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(testutil.SleepDuration)
}

View File

@@ -25,4 +25,6 @@ var (
ReloadStrategy = constants.EnvVarsReloadStrategy
// ReloadOnCreate Adds support to watch create events
ReloadOnCreate = "false"
// EnableHA adds support for running multiple replicas via leadership election
EnableHA = false
)

View File

@@ -16,6 +16,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -35,6 +36,19 @@ var (
SecretResourceType = "secrets"
)
var (
Clients = kube.GetClients()
Pod = "test-reloader-" + RandSeq(5)
Namespace = "test-reloader-" + RandSeq(5)
ConfigmapNamePrefix = "testconfigmap-reloader"
SecretNamePrefix = "testsecret-reloader"
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
Collectors = metrics.NewCollectors()
SleepDuration = 3 * time.Second
)
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})