Compare commits

...

44 Commits

Author SHA1 Message Date
stakater-user
b7220dac28 [skip-ci] Update artifacts 2023-04-27 10:50:23 +00:00
Hussnain Ahmad
e156cfb167 Merge pull request #443 from stakater/readme-update
Updated broken links in readme
2023-04-27 15:41:45 +05:00
hussnain612
9ff2c349af Updated broken links in readme 2023-04-27 15:10:26 +05:00
stakater-user
09e8e5a4cb [skip-ci] Update artifacts 2023-04-05 07:39:25 +00:00
Muneeb Aijaz
c84cf916be Merge pull request #430 from ctrought/feat-enhancelabelselectors
feat: Configmap/secret label selector & refactor namespace selector
2023-04-05 12:20:01 +05:00
stakater-user
c3c700a7a9 [skip-ci] Update artifacts 2023-04-05 06:40:47 +00:00
Hussnain Ahmad
1bdc540700 Merge pull request #298 from karl-johan-grahn/fix-255
fix(255): push image to ghcr instead of docker hub
2023-04-05 11:32:09 +05:00
Hussnain
ddbe3036af Merge remote-tracking branch 'upstream/master' into fix-255 2023-04-05 11:19:34 +05:00
Hussnain
aab8f66b34 Updated actions dependencies 2023-04-05 11:17:26 +05:00
Hussnain
3e88e06199 Updated image path 2023-04-05 11:04:31 +05:00
Hussnain
32ab2ada09 Updated github actions to push to ghcr and dockerHub 2023-04-05 11:04:18 +05:00
stakater-user
71a2b2347a [skip-ci] Update artifacts 2023-04-05 05:55:18 +00:00
Muneeb Aijaz
ca9715b231 Merge pull request #427 from Whisper40/master
 feat: Implement topologySpreadConstraints to improve redundancy (fix #426)
2023-04-05 10:49:14 +05:00
stakater-user
32bda1a2ed [skip-ci] Update artifacts 2023-04-05 05:44:22 +00:00
Muneeb Aijaz
7f706bc130 Merge pull request #431 from Whisper40/patch-1
 feat: Implement initialDelaySeconds for readinessProbe & livenessPr…
2023-04-05 10:36:29 +05:00
Kévin PEREZ
7d08a6a28f feat: Implement initialDelaySeconds for readinessProbe & livenessProbe (fix #429)
PR respond to this feature request : https://github.com/stakater/Reloader/issues/429
2023-04-03 16:17:41 +02:00
ctrought
9ac351c219 fix: controller label selectors as string
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 16:47:54 -04:00
ctrought
24e794bb38 fix: skip controller creation for namespace in test
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 16:47:17 -04:00
ctrought
8e8ce51313 docs: update label selector usage
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 15:48:12 -04:00
ctrought
8ed0899ff3 fix: use apimachinery labelSelector parser to align with string implementation, supports != operator
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 15:38:58 -04:00
ctrought
f00448dfbd docs: cleanup labelSelector documentation
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 15:38:56 -04:00
ctrought
2b619a9243 fix: namespace list/watch permission when using namespaceSelectors
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-04-02 01:31:53 -04:00
ctrought
27c0a9b328 feat: additional labelSelector support
* add option to configure labelSelectors on configmaps & secrets
* support all labelSelectors
* label selector tests
* legacy support for ':' delimited selectors and wildcards

Signed-off-by: Craig Trought <k8s@trought.ca>
2023-03-31 19:08:10 -04:00
ctrought
e39a8f6bcf refactor: use Namespace controller for namespaceSelector
Signed-off-by: Craig Trought <k8s@trought.ca>
2023-03-31 19:08:10 -04:00
stakater-user
3907495a42 [skip-ci] Update artifacts 2023-03-30 19:19:37 +00:00
Rasheed Amir
07889755d9 fix email address (#425) 2023-03-30 21:12:57 +02:00
Kévin PEREZ
49c8f78cff Add topologySpreadConstraints 2023-03-30 09:49:49 +02:00
stakater-user
08ceb6126c [skip-ci] Update artifacts 2023-03-30 05:48:32 +00:00
dhia-gharsallaoui
4b13852de0 fix typo (#423) 2023-03-30 07:27:33 +02:00
stakater-user
197f009fc9 [skip-ci] Update artifacts 2023-03-27 06:54:41 +00:00
Faizan Ahmad
4bc71b145e Merge pull request #421 from stakater/update-go-version-and-dependencies 2023-03-27 08:32:12 +02:00
faizanahmad055
be83553487 Update go version to 1.20.2 and update dependencies
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-03-25 21:45:33 +01:00
stakater-user
b193a7b94c [skip-ci] Update artifacts 2023-03-10 23:25:51 +00:00
Faizan Ahmad
ba64c8ff4d Merge pull request #412 from jstewart612/patch-1
[helm] deployment replicas int comparison fix
2023-03-11 00:19:17 +01:00
John Stewart
1c165c86da [helm] make sure comparison for deployment replicas is always doing so against an int 2023-03-10 16:31:09 -05:00
stakater-user
6498f5a536 [skip-ci] Update artifacts 2023-03-10 17:03:18 +00:00
Faizan Ahmad
7745a1ff52 Merge pull request #410 from jordanfelle/patch-1
Fixing eval for enabling HA
2023-03-10 17:54:51 +01:00
jordanfelle
a8ee7068a5 Fixing eval for enabling HA
When set to 1.0

 <gt .Values.reloader.deployment.replicas 1.0>: error calling gt: incompatible types for comparison

But works when set to 1
2023-03-10 06:44:06 -05:00
Karl-Johan Grahn
2cca412425 Merge branch 'master' into fix-255 2022-06-14 09:05:26 +02:00
Karl-Johan Grahn
72ae858c14 fix(255): add back 2022-03-21 22:40:17 +01:00
Karl-Johan Grahn
77e2df9dfb fix(255): checkout does not work as expected 2022-03-21 22:34:45 +01:00
Karl-Johan Grahn
a5bb0392b1 fix(255): add registry var 2022-03-21 21:58:44 +01:00
Karl-Johan Grahn
cb39cf1a03 fix(255): update pr workflow too 2022-03-21 21:57:56 +01:00
Karl-Johan Grahn
3b94615934 fix(255): push image to ghcr instead of docker hub 2022-03-21 21:51:33 +01:00
22 changed files with 427 additions and 165 deletions

View File

@@ -7,9 +7,10 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.20.1
GOLANG_VERSION: 1.20.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.10.0"
REGISTRY: ghcr.io
jobs:
build:
@@ -24,7 +25,7 @@ jobs:
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v3.4
uses: azure/setup-helm@v3
- name: Set up Go
id: go
@@ -81,23 +82,23 @@ jobs:
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Login to Registry
uses: docker/login-action@v1
- name: Login to Docker Registry
uses: docker/login-action@v2
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
- name: Generate image repository path for Docker registry
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
echo DOCKER_IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image
uses: docker/build-push-action@v2
- name: Build and Push Docker Image to Docker registry
uses: docker/build-push-action@v4
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
@@ -107,28 +108,49 @@ jobs:
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Login to ghcr registry
uses: docker/login-action@v2
with:
registry: ${{env.REGISTRY}}
username: ${{github.actor}}
password: ${{secrets.GITHUB_TOKEN}}
- name: Generate image repository path for ghcr registry
run: |
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image to ghcr registry
uses: docker/build-push-action@v4
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Comment on PR
uses: mshick/add-pr-comment@v1
uses: mshick/add-pr-comment@v2
if: always()
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
allow-repeats: false
- name: Notify Failure
if: failure()
uses: mshick/add-pr-comment@v1
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
allow-repeats: false
message-success: '@${{ github.actor }} Image is available for testing. `docker pull ghcr.io/${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
message-failure: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
allow-repeats: true
- name: Notify Slack
uses: 8398a7/action-slack@v3

View File

@@ -7,10 +7,11 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.20.1
GOLANG_VERSION: 1.20.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.10.0"
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
REGISTRY: ghcr.io
jobs:
build:
@@ -27,7 +28,7 @@ jobs:
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v3.4
uses: azure/setup-helm@v3
- name: Set up Go
id: go
@@ -70,7 +71,7 @@ jobs:
- name: Generate Tag
id: generate_tag
uses: anothrNick/github-tag-action@1.36.0
uses: anothrNick/github-tag-action@1.61.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
@@ -78,22 +79,23 @@ jobs:
DRY_RUN: true
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Login to Registry
uses: docker/login-action@v1
- name: Login to Docker Registry
uses: docker/login-action@v2
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
- name: Generate image repository path for Docker registry
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v2
echo DOCKER_IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image to Docker registry
uses: docker/build-push-action@v4
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
@@ -103,7 +105,35 @@ jobs:
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Login to ghcr registry
uses: docker/login-action@v2
with:
registry: ${{env.REGISTRY}}
username: ${{github.actor}}
password: ${{secrets.GITHUB_TOKEN}}
- name: Generate image repository path for ghcr registry
run: |
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image to ghcr registry
uses: docker/build-push-action@v4
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
@@ -116,7 +146,7 @@ jobs:
# Generate tag for operator without "v"
- name: Generate Operator Tag
id: generate_operator_tag
uses: anothrNick/github-tag-action@1.36.0
uses: anothrNick/github-tag-action@1.61.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: false
@@ -165,7 +195,7 @@ jobs:
branch: ${{ github.ref }}
- name: Push Latest Tag
uses: anothrNick/github-tag-action@1.36.0
uses: anothrNick/github-tag-action@1.61.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true

View File

@@ -6,7 +6,7 @@ on:
- "v*"
env:
GOLANG_VERSION: 1.20.1
GOLANG_VERSION: 1.20.2
jobs:
build:

View File

@@ -2,7 +2,7 @@ ARG BUILDER_IMAGE
ARG BASE_IMAGE
# Build the manager binary
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.20.1} as builder
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.20.2} as builder
ARG TARGETOS
ARG TARGETARCH

View File

@@ -9,7 +9,7 @@ ALL_ARCH ?= arm64 arm amd64
BUILDER_IMAGE ?=
BASE_IMAGE ?=
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
DOCKER_IMAGE ?= ghcr.io/stakater/reloader
# Default value "dev"
VERSION ?= 0.0.1

View File

@@ -1,13 +1,13 @@
# ![](assets/web/reloader-round-100px.png) RELOADER
[![Go Report Card](https://goreportcard.com/badge/github.com/stakater/reloader?style=flat-square)](https://goreportcard.com/report/github.com/stakater/reloader)
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](http://godoc.org/github.com/stakater/reloader)
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/stakater/reloader)
[![Release](https://img.shields.io/github/release/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)
[![GitHub tag](https://img.shields.io/github/tag/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)
[![Docker Stars](https://img.shields.io/docker/stars/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)
[![license](https://img.shields.io/github/license/stakater/reloader.svg?style=flat-square)](LICENSE)
[![Get started with Stakater](https://stakater.github.io/README/stakater-github-banner.png)](http://stakater.com/?utm_source=Reloader&utm_medium=github)
[![Get started with Stakater](https://stakater.github.io/README/stakater-github-banner.png)](https://stakater.com/?utm_source=Reloader&utm_medium=github)
## Problem
@@ -73,7 +73,7 @@ of whether they have the `reloader.stakater.com/match: "true"` annotation or
not.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](#Configmap) or [Secret](#Secret)
### Configmap
@@ -144,6 +144,7 @@ spec:
- you may override the secret annotation with the `--secret-annotation` flag
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
- you may want to watch only a set of secrets/configmaps with certain labels by using the `--resource-label-selector` flag
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
@@ -181,26 +182,52 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
| --resources-to-ignore=configMaps | To ignore configMaps |
| --resources-to-ignore=secrets | To ignore secrets |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
Reloader can be configured to only watch secrets/configmaps with one or more labels using the `--resource-label-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (ie. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
These selectors can be combined together, for example with:
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `--namespace-selector` parameter, for example:
```
--namespace-selector=reloder:enabled,test:true
--resource-label-selector=reloader=enabled,key-exists,another-label in (value1,value2,value3)
```
Only namespaces labeled like the following namespace YAML will be watched:
Only configmaps or secrets labeled like the following will be watched:
```yaml
kind: ConfigMap
apiVersion: v1
metadata:
...
labels:
reloader: enabled
key-exists: yes
another-label: value1
...
```
Reloader can be configured to only watch namespaces labeled with one or more labels using the `--namespace-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (ie. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
These selectors can be combined together, for example with:
```
--namespace-selector=reloader=enabled,test=true
```
Only namespaces labeled as below would be watched and eligible for reloads:
```yaml
kind: Namespace
apiVersion: v1
metadata:
...
labels:
reloder: enabled
reloader: enabled
test: true
...
```
If you want to select namespace only by the key of the label use ```*``` as the value.
For example, for ```--namespace-selector=select-this:*``` all namespaces with label-key "select-this" will be selected regardless of the labels value
### Vanilla kustomize
@@ -251,13 +278,21 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `namespaceSelector` parameter
Reloader can be configured to only watch namespaces labeled with one or more labels using the `namespaceSelector` parameter
| Parameter | Description | Type |
| ---------------- | -------------------------------------------------------------- | ------- |
| namespaceSelector | list of comma separated key:value namespace | string |
| Parameter | Description | Type |
| ---------------- | ---------------------------------------------------------------------------------- | ------- |
| namespaceSelector | list of comma separated label selectors, if mulitple are provided they are ANDed | string |
Reloader can be configured to only watch configmaps/secrets labeled with one or more labels using the `resourceLabelSelector` parameter
| Parameter | Description | Type |
| ---------------------- | ---------------------------------------------------------------------------------- | ------- |
| resourceLabelSelector | list of comma separated label selectors, if mulitple are provided they are ANDed | string |
**Note:** Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configMap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored)
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
@@ -288,7 +323,7 @@ You can find more documentation [here](docs)
### Have a question?
File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).
File a GitHub [issue](https://github.com/stakater/Reloader/issues).
### Talk to us on Slack
@@ -318,7 +353,7 @@ PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
4. **Push** your work back up to your fork
5. Submit a **Pull request** so that we can review your changes
NOTE: Be sure to merge the latest from "upstream" before making a pull request!
**NOTE:** Be sure to merge the latest from "upstream" before making a pull request!
## Changelog
@@ -326,18 +361,17 @@ View our closed [Pull Requests](https://github.com/stakater/Reloader/pulls?q=is%
## License
Apache2 © [Stakater](http://stakater.com)
Apache2 © [Stakater][website]
## About
`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at <hello@stakater.com>
See [our other projects][community]
See [our other projects](https://github.com/stakater)
or contact us in case of professional services and queries on <hello@stakater.com>
[website]: http://stakater.com/
[community]: https://github.com/stakater/
[website]: https://stakater.com
## Acknowledgements
- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented here why we re-created [Reloader](docs/Reloader-vs-ConfigmapController.md)
- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented [here](docs/Reloader-vs-ConfigmapController.md) why we re-created Reloader

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v1.0.13
appVersion: v1.0.13
version: v1.0.23
appVersion: v1.0.23
keywords:
- Reloader
- kubernetes

View File

@@ -38,6 +38,8 @@ rules:
- namespaces
verbs:
- get
- list
- watch
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:

View File

@@ -63,6 +63,10 @@ spec:
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.topologySpreadConstraints }}
topologySpreadConstraints:
{{ toYaml .Values.reloader.deployment.topologySpreadConstraints | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
@@ -137,6 +141,7 @@ spec:
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
initialDelaySeconds: {{ .Values.reloader.deployment.livenessProbe.initialDelaySeconds | default "10" }}
readinessProbe:
httpGet:
path: /metrics
@@ -145,6 +150,7 @@ spec:
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
initialDelaySeconds: {{ .Values.reloader.deployment.readinessProbe.initialDelaySeconds | default "10" }}
{{- $containerSecurityContext := .Values.reloader.deployment.containerSecurityContext | default dict }}
{{- if .Values.reloader.readOnlyRootFileSystem }}
@@ -159,7 +165,7 @@ spec:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -175,7 +181,10 @@ spec:
{{- end }}
{{- if .Values.reloader.namespaceSelector }}
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.resourceLabelSelector }}
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"
@@ -210,7 +219,7 @@ spec:
{{- if ne .Values.reloader.reloadStrategy "default" }}
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
{{- end }}
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
{{- if or (gt (int .Values.reloader.deployment.replicas) 1) (.Values.reloader.enableHA) }}
- "--enable-ha=true"
{{- end}}
{{- end }}

View File

@@ -20,7 +20,8 @@ reloader:
syncAfterRestart: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
namespaceSelector: "" # Comma separated list of 'key:value' labels for namespaces selection
namespaceSelector: "" # Comma separated list of k8s label selectors for namespaces selection
resourceLabelSelector: "" # Comma separated list of k8s label selectors for configmap/secret selection
logFormat: "" #json
watchGlobally: true
# Set to true to enable leadership election allowing you to run multiple replicas
@@ -66,14 +67,26 @@ reloader:
# effect: "NoSchedule"
tolerations: []
# Topology spread constraints for pod assignment
# Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
# Example:
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app: my-app
topologySpreadConstraints: []
annotations: {}
labels:
provider: stakater
group: com.stakater.platform
version: v1.0.13
version: v1.0.23
image:
name: stakater/reloader
tag: v1.0.13
name: ghcr.io/stakater/reloader
tag: v1.0.23
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -8,13 +8,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.13
version: v1.0.23
name: reloader-reloader
namespace: default
spec:
@@ -28,16 +28,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.13
version: v1.0.23
spec:
containers:
- image: "stakater/reloader:v1.0.13"
- image: "ghcr.io/stakater/reloader:v1.0.23"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -52,6 +52,7 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
httpGet:
path: /metrics
@@ -60,6 +61,7 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
securityContext:
{}

View File

@@ -8,7 +8,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -8,7 +8,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -25,7 +25,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -79,7 +79,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -102,13 +102,13 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.13
version: v1.0.23
name: reloader-reloader
namespace: default
spec:
@@ -122,16 +122,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v1.0.13"
chart: "reloader-v1.0.23"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v1.0.13
version: v1.0.23
spec:
containers:
- image: "stakater/reloader:v1.0.13"
- image: "ghcr.io/stakater/reloader:v1.0.23"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -146,6 +146,7 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
httpGet:
path: /metrics
@@ -154,6 +155,7 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
securityContext:
{}

14
go.mod
View File

@@ -10,10 +10,10 @@ require (
github.com/prometheus/client_golang v1.14.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
k8s.io/api v0.26.2
k8s.io/apimachinery v0.26.2
k8s.io/client-go v0.26.2
k8s.io/kubectl v0.26.2
k8s.io/api v0.26.3
k8s.io/apimachinery v0.26.3
k8s.io/client-go v0.26.3
k8s.io/kubectl v0.26.3
)
require (
@@ -70,9 +70,9 @@ require (
// Replacements for argo-rollouts
replace (
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
k8s.io/api v0.0.0 => k8s.io/api v0.26.1
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.26.1
k8s.io/api v0.0.0 => k8s.io/api v0.26.3
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.26.3
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.26.3
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0

16
go.sum
View File

@@ -554,14 +554,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ=
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU=
k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE=
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI=
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s=
k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ=
k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -573,8 +573,8 @@ k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts=
k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0=
k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4=
k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM=
k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM=
k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230202215443-34013725500c h1:YVqDar2X7YiQa/DVAXFMDIfGF8uGrHQemlrwRU5NlVI=
k8s.io/utils v0.0.0-20230202215443-34013725500c/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=

View File

@@ -19,6 +19,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
// NewReloaderCommand starts the reloader controller
@@ -35,11 +36,12 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:vaule namespace labels to include")
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
@@ -140,19 +142,28 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
resourceLabelSelector, err := getResourceLabelSelector(cmd)
if err != nil {
logrus.Fatal(err)
}
if len(namespaceLabelSelector) > 0 {
logrus.Warnf("namespace-selector is set, will detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
logrus.Warnf("namespace-selector is set, will only detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
}
if len(resourceLabelSelector) > 0 {
logrus.Warnf("resource-label-selector is set, will only detect changes on resources with these labels: %s.", resourceLabelSelector)
}
collectors := metrics.SetupPrometheusEndpoint()
var controllers []*controller.Controller
for k := range kube.ResourceMap {
if ignoredResourcesList.Contains(k) {
if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") {
continue
}
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, collectors)
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, resourceLabelSelector, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -187,19 +198,72 @@ func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
}
func getNamespaceLabelSelector(cmd *cobra.Command) (util.Map, error) {
func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
if err != nil {
logrus.Fatal(err)
}
var namespaceSelectorMap util.Map = make(util.Map)
for _, kv := range slice {
split := strings.Split(kv, ":")
namespaceSelectorMap[split[0]] = split[1]
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
return namespaceSelectorMap, nil
namespaceLabelSelector := strings.Join(slice[:], ",")
_, err = labels.Parse(namespaceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return namespaceLabelSelector, nil
}
func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
if err != nil {
logrus.Fatal(err)
}
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
resourceLabelSelector := strings.Join(slice[:], ",")
_, err = labels.Parse(resourceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return resourceLabelSelector, nil
}
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {

View File

@@ -1,7 +1,6 @@
package controller
import (
"context"
"fmt"
"time"
@@ -22,6 +21,7 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/utils/strings/slices"
)
// Controller for checking events
@@ -35,16 +35,18 @@ type Controller struct {
ignoredNamespaces util.List
collectors metrics.Collectors
recorder record.EventRecorder
namespaceSelector map[string]string
namespaceSelector string
resourceSelector string
}
// controllerInitialized flag determines whether controlled is being initialized
var secretControllerInitialized bool = false
var configmapControllerInitialized bool = false
var selectedNamespacesCache []string
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector map[string]string, collectors metrics.Collectors) (*Controller, error) {
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, error) {
if options.SyncAfterRestart {
secretControllerInitialized = true
@@ -56,6 +58,7 @@ func NewController(
namespace: namespace,
ignoredNamespaces: ignoredNamespaces,
namespaceSelector: namespaceLabelSelector,
resourceSelector: resourceLabelSelector,
resource: resource,
}
eventBroadcaster := record.NewBroadcaster()
@@ -65,7 +68,18 @@ func NewController(
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
optionsModifier := func(options *metav1.ListOptions) {
if resource == "namespaces" {
options.LabelSelector = c.namespaceSelector
} else if len(c.resourceSelector) > 0 {
options.LabelSelector = c.resourceSelector
} else {
options.FieldSelector = fields.Everything().String()
}
}
listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier)
indexer, informer := cache.NewIndexerInformer(listWatcher, kube.ResourceMap[resource], 0, cache.ResourceEventHandlerFuncs{
AddFunc: c.Add,
@@ -84,8 +98,15 @@ func NewController(
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
switch object := obj.(type) {
case *v1.Namespace:
c.addSelectedNamespaceToCache(*object)
return
}
if options.ReloadOnCreate == "true" {
if !c.resourceInIgnoredNamespace(obj) && c.resourceInNamespaceSelector(obj) && secretControllerInitialized && configmapControllerInitialized {
if !c.resourceInIgnoredNamespace(obj) && c.resourceInSelectedNamespaces(obj) && secretControllerInitialized && configmapControllerInitialized {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
@@ -105,45 +126,47 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
return false
}
func (c *Controller) resourceInNamespaceSelector(raw interface{}) bool {
func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool {
if len(c.namespaceSelector) == 0 {
return true
}
switch object := raw.(type) {
case *v1.ConfigMap:
return c.matchLabels(object.ObjectMeta.Namespace)
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
return true
}
case *v1.Secret:
return c.matchLabels(object.ObjectMeta.Namespace)
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
return true
}
}
return true
return false
}
func (c *Controller) matchLabels(resourceNamespace string) bool {
namespace, err := c.client.CoreV1().Namespaces().Get(context.Background(), resourceNamespace, metav1.GetOptions{})
if err != nil {
logrus.Warn(err)
return false
}
func (c *Controller) addSelectedNamespaceToCache(namespace v1.Namespace) {
selectedNamespacesCache = append(selectedNamespacesCache, namespace.GetName())
logrus.Infof("added namespace to be watched: %s", namespace.GetName())
}
for selectorKey, selectorVal := range c.namespaceSelector {
namespaceLabelVal, namespaceLabelKeyExists := namespace.ObjectMeta.Labels[selectorKey]
if namespaceLabelKeyExists && selectorVal == "*" {
continue
}
if !namespaceLabelKeyExists || selectorVal != namespaceLabelVal {
return false
func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) {
for i, v := range selectedNamespacesCache {
if v == namespace.GetName() {
selectedNamespacesCache = append(selectedNamespacesCache[:i], selectedNamespacesCache[i+1:]...)
logrus.Infof("removed namespace from watch: %s", namespace.GetName())
return
}
}
return true
}
// Update function to add an old object and a new object to the queue in case of updating a resource
func (c *Controller) Update(old interface{}, new interface{}) {
if !c.resourceInIgnoredNamespace(new) && c.resourceInNamespaceSelector(new) {
switch new.(type) {
case *v1.Namespace:
return
}
if !c.resourceInIgnoredNamespace(new) && c.resourceInSelectedNamespaces(new) {
c.queue.Add(handler.ResourceUpdatedHandler{
Resource: new,
OldResource: old,
@@ -155,6 +178,12 @@ func (c *Controller) Update(old interface{}, new interface{}) {
// Delete function to add an object to the queue in case of deleting a resource
func (c *Controller) Delete(old interface{}) {
switch object := old.(type) {
case *v1.Namespace:
c.removeSelectedNamespaceFromCache(*object)
return
}
// Todo: Any future delete event can be handled here
}

View File

@@ -45,7 +45,10 @@ func TestMain(m *testing.M) {
logrus.Infof("Creating controller")
for k := range kube.ResourceMap {
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, map[string]string{}, collectors)
if k == "namespaces" {
continue
}
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -2291,7 +2294,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
queue workqueue.RateLimitingInterface
informer cache.Controller
namespace v1.Namespace
namespaceSelector util.Map
namespaceSelector string
}
type args struct {
raw interface{}
@@ -2305,10 +2308,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
{
name: "TestConfigMapResourceInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespaceSelector: "select=this,select2=this2",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
@@ -2326,10 +2326,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
}, {
name: "TestConfigMapResourceNotInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespaceSelector: "select=this,select2=this2",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "not-selected-namespace",
@@ -2345,10 +2342,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
{
name: "TestSecretResourceInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespaceSelector: "select=this,select2=this2",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
@@ -2366,10 +2360,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
}, {
name: "TestSecretResourceNotInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespaceSelector: "select=this,select2=this2",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "not-selected-namespace",
@@ -2382,11 +2373,9 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
},
want: false,
}, {
name: "TestSecretResourceInNamespaceSelectorWiledcardValue",
name: "TestSecretResourceInNamespaceSelectorKeyExists",
fields: fields{
namespaceSelector: util.Map{
"select": "*",
},
namespaceSelector: "select",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
@@ -2400,6 +2389,59 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
},
want: true,
}, {
name: "TestSecretResourceInNamespaceSelectorValueIn",
fields: fields{
namespaceSelector: "select in (select1, select2, select3)",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "select2",
},
},
},
},
args: args{
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
},
want: true,
}, {
name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist",
fields: fields{
namespaceSelector: "!select2",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "this",
},
},
},
},
args: args{
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
},
want: true,
}, {
name: "TestSecretResourceInNamespaceSelectorMultipleConditions",
fields: fields{
namespaceSelector: "select,select2=this2,select3!=this4",
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "this",
"select2": "this2",
"select3": "this3",
},
},
},
},
args: args{
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
},
want: true,
},
}
@@ -2418,9 +2460,21 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
namespaceSelector: tt.fields.namespaceSelector,
}
if got := c.resourceInNamespaceSelector(tt.args.raw); got != tt.want {
listOptions := metav1.ListOptions{}
listOptions.LabelSelector = tt.fields.namespaceSelector
namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions)
for _, ns := range namespaces.Items {
c.addSelectedNamespaceToCache(ns)
}
if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want {
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
}
for _, ns := range namespaces.Items {
c.removeSelectedNamespaceFromCache(ns)
}
})
}
}

View File

@@ -119,7 +119,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
t.Logf("Creating controller")
var controllers []*controller.Controller
for k := range kube.ResourceMap {
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, map[string]string{}, metrics.NewCollectors())
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, "", "", metrics.NewCollectors())
if err != nil {
logrus.Fatalf("%s", err)
}

View File

@@ -9,4 +9,5 @@ import (
var ResourceMap = map[string]runtime.Object{
"configMaps": &v1.ConfigMap{},
"secrets": &v1.Secret{},
"namespaces": &v1.Namespace{},
}