Compare commits

...

22 Commits

Author SHA1 Message Date
Muneeb Aijaz
2c7480ac67 Merge pull request #704 from deschmih/master
feature "reload-on-delete" documented in README.md
2024-07-08 13:05:43 +05:00
DESCHMIH
3f49abebb9 feature "reload-on-delete" documented in README.md 2024-07-08 10:00:27 +02:00
DESCHMIH
070355e79d feature "reload-on-delete" documented in README.md 2024-07-08 09:47:54 +02:00
stakater-user
49997ac248 [skip-ci] Update artifacts 2024-07-08 07:06:27 +00:00
Muneeb Aijaz
b6894f6de1 Merge pull request #695 from deschmih/master
feature reload-on-delete implemented, test cases enhanced
2024-07-08 11:56:38 +05:00
DESCHMIH
cda9fa9bf0 feature reload-on-delete removed from doc 2024-07-08 08:49:17 +02:00
DESCHMIH
074a42d8ce feature reload-on-delete removed from doc 2024-07-08 08:28:38 +02:00
deschmih
cbf501fc75 Merge branch 'master' into master 2024-07-08 07:56:01 +02:00
stakater-user
9f5211b730 [skip-ci] Update artifacts 2024-06-26 08:16:23 +00:00
renovate[bot]
ab7e4ddca7 fix(deps): update module github.com/argoproj/argo-rollouts to v1.7.1 (#702)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-06-26 13:41:38 +05:30
stakater-user
fc955f72c8 [skip-ci] Update artifacts 2024-06-21 16:04:02 +00:00
Muneeb Aijaz
0bcf8cb1e1 Merge pull request #686 from jfroy/oci-invalid-version
fix: replace + with _ in Chart.Version
2024-06-21 20:24:48 +05:00
stakater-user
ac7e965326 [skip-ci] Update artifacts 2024-06-21 15:12:42 +00:00
Muneeb Aijaz
7f5ea8c18f Merge pull request #690 from alexanderldavis/enable_metrics_by_namespace
feat: add new opt-in metric of reloads by namespace
2024-06-21 19:53:31 +05:00
stakater-user
b73b04d9ae [skip-ci] Update artifacts 2024-06-21 13:50:22 +00:00
deschmih
2260d72873 feature reload-on-delete implemented, test cases enhanced 2024-06-18 09:57:56 +02:00
Alexander Davis
6cd458b8ed chore(tests): fix ReloadedByNamespace tests 2024-06-14 16:24:09 -05:00
Alexander Davis
09d90532e6 chore(docs): apply reviewdog grammar suggestion 2024-06-14 15:54:55 -05:00
Alexander Davis
220a9d5f68 fix: wrong conditional on new metric test 2024-06-14 15:49:22 -05:00
Alexander Davis
6eeba71273 chore(docs): add information about new opt-in metric 2024-06-14 15:30:30 -05:00
Alexander Davis
3f6e98a9d5 feat: add new opt-in metric of reloads by namespace 2024-06-14 15:20:38 -05:00
Jean-Francois Roy
5d6309b941 fix: replace + with _ in Chart.Version
When using Flux with the OCI chart, the chart version has a git hash
suffix (ex: 1.0.107+20eb5010550b), which is invalid as a metadata label.
2024-06-11 11:36:55 -07:00
21 changed files with 896 additions and 215 deletions

View File

@@ -327,6 +327,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
| `reloader.ignoreConfigMaps` | To ignore configMaps. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
@@ -338,6 +339,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
| `reloader.legacy.rbac` | | boolean | `false` |
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
| `reloader.enableMetricsByNamespace` | Expose an additional Prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces) | boolean | `false` |
#### Deployment Reloader Parameters
@@ -384,14 +386,18 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
- `reloadOnCreate` controls how Reloader handles secrets being added to the cache for the first time. If `reloadOnCreate` is set to true:
1. Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload
1. When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload
1. If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected
1. Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload
1. When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload
1. If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected
- `reloadOnDelete` controls how Reloader handles secrets being deleted. If `reloadOnDelete` is set to true:
1. Configmaps/secrets being deleted will cause Reloader to perform a rolling update of the associated workload
- `serviceMonitor` will be removed in future releases of Reloader in favour of Pod monitor
- If `reloadOnCreate` is set to false:
1. Updates to configmaps/secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs
1. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration
- By default, `reloadOnCreate` and `syncAfterRestart` are both set to false. Both need to be enabled explicitly
1. Updates to configmaps/secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs
1. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration
- If `reloadOnDelete` is set to false:
1. Deleting of configmaps/secrets has no effect to pods that references these resources.
- By default, `reloadOnCreate`, `reloadOnDelete` and `syncAfterRestart` are all set to false. All need to be enabled explicitly
## Help

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 1.0.108
appVersion: v1.0.108
version: 1.0.116
appVersion: v1.0.116
keywords:
- Reloader
- kubernetes

View File

@@ -22,7 +22,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- define "reloader-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}

View File

@@ -76,7 +76,7 @@ spec:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (.Values.reloader.deployment.env.existing) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (.Values.reloader.deployment.env.existing) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA) (.Values.reloader.enableMetricsByNamespace)}}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
@@ -129,6 +129,10 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- if .Values.reloader.enableMetricsByNamespace }}
- name: METRICS_COUNT_BY_NAMESPACE
value: enabled
{{- end }}
{{- end }}
ports:
@@ -171,7 +175,7 @@ spec:
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -231,6 +235,9 @@ spec:
{{- if eq .Values.reloader.reloadOnCreate true }}
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
{{- end }}
{{- if eq .Values.reloader.reloadOnDelete true }}
- "--reload-on-delete={{ .Values.reloader.reloadOnDelete }}"
{{- end }}
{{- if eq .Values.reloader.syncAfterRestart true }}
- "--sync-after-restart={{ .Values.reloader.syncAfterRestart }}"
{{- end }}

View File

@@ -18,6 +18,7 @@ reloader:
ignoreSecrets: false
ignoreConfigMaps: false
reloadOnCreate: false
reloadOnDelete: false
syncAfterRestart: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
@@ -32,6 +33,8 @@ reloader:
legacy:
rbac: false
matchLabels: {}
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
enableMetricsByNamespace: false
deployment:
# If you wish to run multiple replicas set reloader.enableHA = true
replicas: 1
@@ -89,10 +92,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v1.0.108
version: v1.0.116
image:
name: ghcr.io/stakater/reloader
tag: v1.0.108
tag: v1.0.116
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:

View File

@@ -17,7 +17,7 @@ spec:
app: reloader-reloader
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.0.108"
- image: "ghcr.io/stakater/reloader:v1.0.116"
imagePullPolicy: IfNotPresent
name: reloader-reloader
ports:

View File

@@ -8,7 +8,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-1.0.108"
chart: "reloader-1.0.116"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -25,7 +25,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-1.0.108"
chart: "reloader-1.0.116"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -92,7 +92,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-1.0.108"
chart: "reloader-1.0.116"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -115,13 +115,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-1.0.108"
chart: "reloader-1.0.116"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.108
version: v1.0.116
name: reloader-reloader
namespace: default
spec:
@@ -135,16 +135,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-1.0.108"
chart: "reloader-1.0.116"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.108
version: v1.0.116
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.0.108"
- image: "ghcr.io/stakater/reloader:v1.0.116"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -172,6 +172,13 @@ spec:
securityContext:
{}
resources:
limits:
cpu: 150m
memory: 512Mi
requests:
cpu: 10m
memory: 128Mi
securityContext:
runAsNonRoot: true
runAsUser: 65534

View File

@@ -23,6 +23,8 @@ reloader:
legacy:
rbac: false
matchLabels: {}
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
enableMetricsByNamespace: false
deployment:
replicas: 1
nodeSelector:

View File

@@ -60,3 +60,16 @@ When Reloader is unable to reload, `reloader_reload_executed_total{success="fals
reloader_reload_executed_total{success="false"} 15
reloader_reload_executed_total{success="true"} 12
```
### Reloads by Namespace
Reloader can also export a metric to show the number of reloads by namespace. This feature is disabled by default, as it can lead to high cardinality in clusters with many namespaces.
The metric will have both `success` and `namespace` as attributes:
```text
reloader_reload_executed_total{success="false", namespace="some-namespace"} 2
reloader_reload_executed_total{success="true", namespace="some-namespace"} 1
```
To opt in, set the environment variable `METRICS_COUNT_BY_NAMESPACE` to `enabled` or set the Helm value `reloader.enableMetricsByNamespace` to `true`.

2
go.mod
View File

@@ -3,7 +3,7 @@ module github.com/stakater/Reloader
go 1.21
require (
github.com/argoproj/argo-rollouts v1.7.0
github.com/argoproj/argo-rollouts v1.7.1
github.com/openshift/api v3.9.0+incompatible
github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba
github.com/parnurzeal/gorequest v0.2.16

2
go.sum
View File

@@ -2,6 +2,8 @@ github.com/argoproj/argo-rollouts v1.6.6 h1:JCJ0cGAwWkh2xCAHZ1OQmrobysRjCatmG9IZ
github.com/argoproj/argo-rollouts v1.6.6/go.mod h1:X2kTiBaYCSounmw1kmONdIZTwJNzNQYC0SrXUgSw9UI=
github.com/argoproj/argo-rollouts v1.7.0 h1:3Hv8MUwGIWuZe50+yyS/4X21t4jT4shdGvgDe0lG6aw=
github.com/argoproj/argo-rollouts v1.7.0/go.mod h1:Te4HrUELxKiBpK8lgk77o4gTa3mv8pXCd8xdPprKrbs=
github.com/argoproj/argo-rollouts v1.7.1 h1:KWStvHPRrNTbDTclogiYC/s4eKbdS45KkgXiorDzKG0=
github.com/argoproj/argo-rollouts v1.7.1/go.mod h1:Te4HrUELxKiBpK8lgk77o4gTa3mv8pXCd8xdPprKrbs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=

View File

@@ -51,6 +51,7 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")

View File

@@ -178,13 +178,22 @@ func (c *Controller) Update(old interface{}, new interface{}) {
// Delete function to add an object to the queue in case of deleting a resource
func (c *Controller) Delete(old interface{}) {
if options.ReloadOnDelete == "true" {
if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized {
c.queue.Add(handler.ResourceDeleteHandler{
Resource: old,
Collectors: c.collectors,
Recorder: c.recorder,
})
}
}
switch object := old.(type) {
case *v1.Namespace:
c.removeSelectedNamespaceFromCache(*object)
return
}
// Todo: Any future delete event can be handled here
}
// Run function for controller which handles the queue

View File

@@ -27,7 +27,7 @@ func (r ResourceCreatedHandler) Handle() error {
return sendUpgradeWebhook(config, options.WebhookUrl)
}
// process resource based on its type
return doRollingUpgrade(config, r.Collectors, r.Recorder)
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
}
return nil
}

View File

@@ -0,0 +1,92 @@
package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
)
// ResourceDeleteHandler contains new objects
type ResourceDeleteHandler struct {
Resource interface{}
Collectors metrics.Collectors
Recorder record.EventRecorder
}
// Handle processes resources being deleted
func (r ResourceDeleteHandler) Handle() error {
if r.Resource == nil {
logrus.Errorf("Resource delete handler received nil resource")
} else {
config, _ := r.GetConfig()
// Send webhook
if options.WebhookUrl != "" {
return sendUpgradeWebhook(config, options.WebhookUrl)
}
// process resource based on its type
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy)
}
return nil
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
var oldSHAData string
var config util.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
return config, oldSHAData
}
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
}
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
config.SHAValue = testutil.GetSHAfromEmptyData()
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
}
//remove if env var exists
containers := upgradeFuncs.ContainersFunc(item)
for i := range containers {
envs := containers[i].Env
index := -1
for j := range envs {
if envs[j].Name == envVar {
index = j
break
}
}
if index != -1 {
containers[i].Env = append(containers[i].Env[:index], containers[i].Env[index+1:]...)
return constants.Updated
}
}
return constants.NotUpdated
}

View File

@@ -29,7 +29,7 @@ func (r ResourceUpdatedHandler) Handle() error {
return sendUpgradeWebhook(config, options.WebhookUrl)
}
// process resource based on its type
return doRollingUpgrade(config, r.Collectors, r.Recorder)
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
}
}
return nil

View File

@@ -142,35 +142,35 @@ func sendWebhook(url string) (string, []error) {
return buffer.String(), nil
}
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder) error {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
clients := kube.GetClients()
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder)
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder)
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder)
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder)
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
if kube.IsOpenshift {
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder)
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder)
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
@@ -179,17 +179,17 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors, recorder)
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
return err
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
// PerformAction invokes the deployment if there is any change in configmap or secret data
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
@@ -210,7 +210,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
result = strategy(upgradeFuncs, i, config, true)
}
if result != constants.Updated && annotationValue != "" {
@@ -219,7 +219,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
value = strings.TrimSpace(value)
re := regexp.MustCompile("^" + value + "$")
if re.Match([]byte(config.ResourceName)) {
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
result = strategy(upgradeFuncs, i, config, false)
if result == constants.Updated {
break
}
@@ -230,7 +230,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
result = strategy(upgradeFuncs, i, config, true)
}
}
@@ -245,6 +245,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
logrus.Errorf(message)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
if recorder != nil {
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
}
@@ -254,6 +255,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
logrus.Infof(message)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
if recorder != nil {
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
@@ -380,6 +382,8 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
return container
}
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
@@ -416,6 +420,13 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
return constants.Updated
}
func getReloaderAnnotationKey() string {
return fmt.Sprintf("%s/%s",
constants.ReloaderAnnotationPrefix,
constants.LastReloadedFromAnnotation,
)
}
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
if target == nil {
return nil, errors.New("target is required")
@@ -426,10 +437,7 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
// Intentionally only storing the last item in order to keep
// the generated annotations as small as possible.
annotations := make(map[string]string)
lastReloadedResourceName := fmt.Sprintf("%s/%s",
constants.ReloaderAnnotationPrefix,
constants.LastReloadedFromAnnotation,
)
lastReloadedResourceName := getReloaderAnnotationKey()
lastReloadedResource, err := json.Marshal(target)
if err != nil {
@@ -440,9 +448,13 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
return annotations, nil
}
func getEnvVarName(resourceName string, typeName string) string {
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
}
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,16 @@
package metrics
import (
"net/http"
"os"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
)
type Collectors struct {
Reloaded *prometheus.CounterVec
Reloaded *prometheus.CounterVec
ReloadedByNamespace *prometheus.CounterVec
}
func NewCollectors() Collectors {
@@ -17,21 +20,40 @@ func NewCollectors() Collectors {
Name: "reload_executed_total",
Help: "Counter of reloads executed by Reloader.",
},
[]string{"success"},
[]string{
"success",
},
)
//set 0 as default value
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
reloaded_by_namespace := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "reloader",
Name: "reload_executed_total_by_namespace",
Help: "Counter of reloads executed by Reloader by namespace.",
},
[]string{
"success",
"namespace",
},
)
return Collectors{
Reloaded: reloaded,
Reloaded: reloaded,
ReloadedByNamespace: reloaded_by_namespace,
}
}
func SetupPrometheusEndpoint() Collectors {
collectors := NewCollectors()
prometheus.MustRegister(collectors.Reloaded)
if os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled" {
prometheus.MustRegister(collectors.ReloadedByNamespace)
}
http.Handle("/metrics", promhttp.Handler())
return collectors

View File

@@ -32,7 +32,9 @@ var (
// ReloadStrategy Specify the update strategy
ReloadStrategy = constants.EnvVarsReloadStrategy
// ReloadOnCreate Adds support to watch create events
ReloadOnCreate = "false"
ReloadOnCreate = "false"
// ReloadOnDelete Adds support to watch delete events
ReloadOnDelete = "false"
SyncAfterRestart = false
// EnableHA adds support for running multiple replicas via leadership election
EnableHA = false

View File

@@ -934,6 +934,55 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
return false
}
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
accessor, err := meta.Accessor(i)
if err != nil {
return false
}
annotations := accessor.GetAnnotations()
// match statefulsets with the correct annotation
annotationValue := annotations[config.Annotation]
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue)
matches := false
if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled {
matches = true
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
value := GetResourceSHAFromEnvVar(containers, envName)
if value == "" {
return true
}
}
}
return false
}
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
@@ -978,3 +1027,7 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
}
return false
}
func GetSHAfromEmptyData() string {
return crypto.GenerateSHA("")
}